@TechReport{vitter:prefetch, author = {Jeffrey Scott Vitter and P. Krishnan}, title = {Optimal Prefetching via Data Compression}, year = {1991}, month = {July}, number = {CS--91--46}, institution = {Brown University}, note = {A summary appears in FOCS '91}, later = {vitter:jprefetch}, URL = {ftp://ftp.cs.brown.edu/pub/techreports/91/cs91-46.ps.Z}, keywords = {parallel I/O algorithms, disk prefetching, pario-bib}, abstract = {Caching and prefetching are important mechanisms for speeding up access time to data on secondary storage. Recent work in competitive online algorithms has uncovered several promising new algorithms for caching. In this paper, we apply a form of the competitive philosophy for the first time to the problem of prefetching to develop an optimal universal prefetcher in terms of fault ratio, with particular applications to large-scale databases and hypertext systems. Our algorithms for prefetching are novel in that they are based on data compression techniques that are both theoretically optimal and good in practice. Intuitively, in order to compress data effectively, you have to be able to predict future data well, and thus good data compressors should be able to predict well for purposes of prefetching. We show for powerful models such as Markov sources and $m$th order Markov sources that the page fault rates incurred by our prefetching algorithms are optimal in the limit for almost all sequences of page accesses.}, comment = {``This... is on prefetching, but I think the ideas will have a lot of use with parallel disks. The implementations we have now are doing amazingly well compared to LRU.'' [Vitter]. See vitter:jprefetch.} }