@InProceedings{hartman:zebra2, author = {John H. Hartman and John K. Ousterhout}, title = {The {Zebra} Striped Network File System}, booktitle = {Proceedings of the Fourteenth ACM Symposium on Operating Systems Principles}, year = {1993}, pages = {29--43}, publisher = {ACM Press}, address = {Asheville, NC}, earlier = {hartman:zebra}, later = {hartman:zebra3}, keywords = {file system, disk striping, distributed file system, RAID, log-structured file system, parallel I/O, pario-bib}, comment = {Zebra stripes across network servers, but not on a file-by-file basis. Instead they use LFS ideas to stripe a per-client log across all file servers. Each client can then compute a parity block for each strip that it writes. They store ``deltas'', changes in block locations, in with the data, and also send them to the (central) file manager. The file manager and stripe cleaner are key state managers, that keep track of where blocks are located, and of stripe utilizations. Performance numbers limited to small-scale tests. This paper has more details than hartman:zebra, and performance numbers (but not with real workloads or stripe cleaner). Some tricky consistency issues.} }