@InProceedings{krieger:hfs, author = {Orran Krieger and Michael Stumm}, title = {{HFS:} A Flexible File System for large-scale Multiprocessors}, booktitle = {Proceedings of the 1993 DAGS/PC Symposium}, year = {1993}, month = {June}, pages = {6--14}, organization = {Dartmouth Institute for Advanced Graduate Studies}, address = {Hanover, NH}, later = {krieger:hfs2}, URL = {ftp://ftp.cs.toronto.edu/pub/parallel/Krieger_Stumm_DAGS93.ps.Z}, keywords = {multiprocessor file system, parallel I/O, operating system, shared memory, pario-bib}, abstract = {The {H{\sc urricane}} File System (HFS) is a new file system being developed for large-scale shared memory multiprocessors with distributed disks. The main goal of this file system is scalability; that is, the file system is designed to handle demands that are expected to grow linearly with the number of processors in the system. To achieve this goal, HFS is designed using a new structuring technique called Hierarchical Clustering. HFS is also designed to be flexible in supporting a variety of policies for managing file data and for managing file system state. This flexibility is necessary to support in a scalable fashion the diverse workloads we expect for a multiprocessor file system.}, comment = {This paper is now out of date; see krieger:thesis. Designed for scalability on the hierarchical clustering model (see unrau:cluster), the Hurricane File System for NUMA shared-memory MIMD machines. Each cluster has its own full file system, which communicates with those in other clusters. Pieces are name server, open-file server, and block-file server. On first access, the file is mapped into the application space. VM system calls BFS to arrange transfers. Open questions: policies for file state management, block distribution, caching, and prefetching. Object-oriented approach used to allow for flexibility and extendability. Local disk file systems are log-structured.} }