BibTeX file for the Third Annual WORKSHOP ON I/O IN PARALLEL AND DISTRIBUTED SYSTEMS (IOPADS) April 25, 1995, Red Lion Resort, Santa Barbara, California Sponsored by: Held in conjunction with: IEEE Technical Committee on International Parallel Processing Parallel Processing. Symposium (IPPS '95). In cooperation with: ACM SIGARCH. (Please leave this header on any copies of this file.) Updated Fri Jun 2 14:09:45 EDT 1995 @InProceedings{corbett:mpi-overview, author = {Peter Corbett and Dror Feitelson and Sam Fineberg and Yarsun Hsu and Bill Nitzberg and Jean-Pierre Prost and Marc Snir and Bernard Traversat and Parkson Wong}, title = {Overview of the {MPI-IO} Parallel {I/O} Interface}, booktitle = {IPPS~'95 Workshop on Input/Output in Parallel and Distributed Systems}, year = {1995}, month = {April}, pages = {1--15}, URL = {http://lovelace.nas.nasa.gov/MPI-IO/iopads95-paper.ps}, keyword = {parallel I/O, multiprocessor file system interface, pario bib}, abstract = {Thanks to MPI, writing portable message passing parallel programs is almost a reality. One of the remaining problems is file I/O. Although parallel file systems support similar interfaces, the lack of a standard makes developing a truly portable program impossible. It is not feasible to develop large scientific applications from scratch for each generation of parallel machine, and, in the scientific world, a program is not considered truly portable unless it not only compiles, but also runs efficiently. The MPI-IO interface is being proposed as an extension to the MPI standard to fill this need. MPI-IO supports a high-level interface to describe the partitioning of file data among processes, a collective interface describing complete transfers of global data structures between process memories and files, asynchronous I/O operations, allowing computation to be overlapped with I/O, and optimization of physical file layout on storage devices (disks).}, comment = {A more readable explanation of MPI-IO than the proposed-standard document corbett:mpi-io3. See also the slides presented at IOPADS, http://lovelace.nas.nasa.gov/MPI-IO/iopads95-talk.ps} } @InProceedings{baylor:workload, author = {Sandra Johnson Baylor and C. Eric Wu}, title = {Parallel {I/O} Workload Characteristics Using {Vesta}}, booktitle = {IPPS~'95 Workshop on Input/Output in Parallel and Distributed Systems}, year = {1995}, month = {April}, pages = {16--29}, keyword = {parallel I/O, workload characterization, pario bib}, comment = {They characterize four parallel applications: sort, matrix multiply, seismic migration, and video server, in terms of their I/O activity.} } @InProceedings{kalns:video, author = {Edgar T. Kalns and Yarsun Hsu}, title = {Video on Demand Using the {Vesta} Parallel File System}, booktitle = {IPPS~'95 Workshop on Input/Output in Parallel and Distributed Systems}, year = {1995}, month = {April}, pages = {30--46}, URL = {http://web.cps.msu.edu/~kalns/papers/iopads.ps}, keyword = {parallel I/O, multimedia, multiprocessor file system, pario bib}, comment = {Hook a video-display system to the compute node of an SP-1 running Vesta, and then use Vesta file system to serve the video.} } @InProceedings{nieuwejaar:strided2, author = {Nils Nieuwejaar and David Kotz}, title = {Low-level Interfaces for High-level Parallel {I/O}}, booktitle = {IPPS~'95 Workshop on Input/Output in Parallel and Distributed Systems}, year = {1995}, month = {April}, pages = {47--62}, URL = {ftp://ftp.cs.dartmouth.edu/pub/CS-techreports/TR95-253.ps.Z}, keyword = {parallel I/O, multiprocessor file system, pario bib, dfk}, abstract = {As the I/O needs of parallel scientific applications increase, file systems for multiprocessors are being designed to provide applications with parallel access to multiple disks. Many parallel file systems present applications with a conventional Unix-like interface that allows the application to access multiple disks transparently. By tracing all the activity of a parallel file system in a production, scientific computing environment, we show that many applications exhibit highly regular, but non-consecutive I/O access patterns. Since the conventional interface does not provide an efficient method of describing these patterns, we present three extensions to the interface that support {\em strided}, {\em nested-strided}, and {\em nested-batched} I/O requests. We show how these extensions can be used to express common access patterns.}, comment = {Identical to revised TR95-253, nieuwejaar:strided2-tr.} } @InProceedings{schloss:hcsa, author = {Gary Schloss and Michael Vernick}, title = {{HCSA:} A Hybrid Client-Server Architecture}, booktitle = {IPPS~'95 Workshop on Input/Output in Parallel and Distributed Systems}, year = {1995}, month = {April}, pages = {63--77}, keyword = {parallel I/O, pario bib}, comment = {In the context of client-server database systems, they propose to make a compromise between shared-disk architectures, where the disks are all attached to the network and all machines are both clients and servers, and a system where the disks are attached to a single server. Their compromise attaches the disks to both the network and the server.} } @InProceedings{kotz:explore, author = {David Kotz and Ting Cai}, title = {Exploring the use of {I/O} Nodes for Computation in a {MIMD} Multiprocessor}, booktitle = {IPPS~'95 Workshop on Input/Output in Parallel and Distributed Systems}, year = {1995}, month = {April}, pages = {78--89}, URL = {ftp://ftp.cs.dartmouth.edu/pub/CS-papers/Kotz/kotz:explore.ps.Z}, keyword = {parallel I/O, multiprocessor file system, dfk, pario bib}, abstract = {As parallel systems move into the production scientific-computing world, the emphasis will be on cost-effective solutions that provide high throughput for a mix of applications. Cost-effective solutions demand that a system make effective use of all of its resources. Many MIMD multiprocessors today, however, distinguish between ``compute'' and ``I/O'' nodes, the latter having attached disks and being dedicated to running the file-system server. This static division of responsibilities simplifies system management but does not necessarily lead to the best performance in workloads that need a different balance of computation and I/O. Of course, computational processes sharing a node with a file-system service may receive less CPU time, network bandwidth, and memory bandwidth than they would on a computation-only node. In this paper we begin to examine this issue experimentally. We found that high-performance I/O does not necessarily require substantial CPU time, leaving plenty of time for application computation. There were some complex file-system requests, however, which left little CPU time available to the application. (The impact on network and memory bandwidth still needs to be determined.) For applications (or users) that cannot tolerate an occasional interruption, we recommend that they continue to use only compute nodes. For tolerant applications needing more cycles than those provided by the compute nodes, we recommend that they take full advantage of {\em both\/} compute and I/O nodes for computation, and that operating systems should make this possible.} } @InProceedings{moyer:scalable, author = {Steven A. Moyer and V. S. Sunderam}, title = {Scalable Concurrency Control for Parallel File Systems}, booktitle = {IPPS~'95 Workshop on Input/Output in Parallel and Distributed Systems}, year = {1995}, month = {April}, pages = {90--106}, keyword = {parallel I/O, pario bib}, abstract = {Parallel file systems employ data declustering to increase I/O throughput. As a result, a single read or write operation can generate concurrent data accesses on multiple storage devices. Unless a concurrency control mechanism is employed, familiar file access semantics are likely to be violated. This paper details the transaction-based concurrency control mechanism implemented in the PIOUS parallel file system. Performance results are presented demonstrating that sequential consistency semantics can be provided without loss of system scalability.}, comment = {Based on, or perhaps identical to, CSTR-950202.} }