@InProceedings{coloma:caching, author = {Kenin Coloma and Alok Choudhary and Wei-keng Liao and Lee Ward and Eric Russell and Neil Pundit}, title = {Scalable high-level caching for parallel {I/O}}, booktitle = {Proceedings of the International Parallel and Distributed Processing Symposium}, year = {2004}, month = {April}, pages = {96b}, publisher = {Los Alamitos, CA, USA : IEEE Comput. Soc, 2004}, copyright = {(c)2005 IEE}, address = {Santa Fe, NM}, URL = {http://csdl.computer.org/comp/proceedings/ipdps/2004/2132/01/213210096babs.htm}, keywords = {client-side file caching, file locking, MPI, pario-bib}, abstract = {In order for I/O systems to achieve high performance in a parallel environment, they must either sacrifice client-side file caching, or keep caching and deal with complex coherency issues. The most common technique for dealing with cache coherency in multi-client file caching environments uses file locks to bypass the client-side cache. Aside from effectively disabling cache usage, file locking is sometimes unavailable on larger systems. \par The high-level abstraction layer of MPI allows us to tackle cache coherency with additional information and coordination without using file locks. By approaching the cache coherency issue further up, the underlying I/O accesses can be modified in such a way as to ensure access to coherent data while satisfying the user's I/O request. We can effectively exploit the benefits of a file system's client-side cache while minimizing its management costs.} }