Skip to content

HTTPS clone URL

Subversion checkout URL

You can clone with
or
.
Download ZIP
Browse files

fill related work

  • Loading branch information...
commit 3f56954d9625e89471b0553e84620c5ca67bb0ce 1 parent dda8940
Ming authored
View
78 doc/references.bib
@@ -255,6 +255,50 @@ @inproceedings{kvworkload_sigmetrics
workload modeling},
}
+@MISC{flashcache,
+ TITLE = "{A Write Back Block Cache for Linux}",
+ NOTE = "\url{https://github.com/facebook/flashcache/}",
+ KEY = "Flashcache"
+}
+
+@MISC{bcache,
+ TITLE = "{A Linux kernel block layer cache}",
+ NOTE = "\url{http://bcache.evilpiepirate.org/}",
+ KEY = "bcache"
+}
+
+@inproceedings{eurosys_12_flashtier,
+ author = "M. Saxena and M. M. Swift and Y. Zhang",
+ title = "{FlashTier: a Lightweight, Consistent and Durable Storage
+ Cache}",
+ booktitle = "Proceedings of the 7th ACM European Conference on Computer
+ Systems",
+ series = {EuroSys '12},
+ year = 2012,
+ location = {Bern, Switzerland},
+ pages = {267--280}
+}
+
+@inproceedings{flashvm,
+ author = "M. Saxena and M. M. Swift",
+ title = {FlashVM: Revisiting the Virtual Memory Hierarchy},
+ BOOKTITLE = "Proceedings of the 12th Workshop on Hot Topics in
+ Operating Systems",
+ year = 2009,
+}
+
+@INPROCEEDINGS{socc11chisl,
+ AUTHOR = "R. P. Spillane and P. J. Shetty and E. Zadok and S. Archak
+ and S. Dixit",
+ TITLE = "An Efficient Multi-Tier Tablet Server Storage
+ Architecture",
+ BOOKTITLE = "Proceedings of the 2nd ACM Symposium on Cloud Computing
+ (SOCC'11)",
+ YEAR = "2011",
+ MONTH = "October",
+ ADDRESS = "Cascais, Portugal",
+}
+
@article{umbrellafs_gos,
author = {Garrison, John A. and Reddy, A. L. Narasimha},
title = {Umbrella file system: Storage management across heterogeneous
@@ -280,7 +324,8 @@ @article{umbrellafs_gos
@techreport{zhang2012multi,
title={Multi-level Hybrid Cache: Impact and Feasibility},
- author={Zhang, Z. and Kim, Y. and Ma, X. and Shipman, G. and Zhou, Y.}, year={2012},
+ author={Zhang, Z. and Kim, Y. and Ma, X. and Shipman, G. and Zhou, Y.},
+ year={2012},
institution={Oak Ridge National Laboratory (ORNL)}
}
@@ -419,3 +464,34 @@ @MISC{memcached
NOTE = "\url{http://memcached.org}",
KEY = "Memcached"
}
+
+@INPROCEEDINGS{sosp09fawn,
+ AUTHOR = "D. G. Andersen and J. Franklin and M. Kaminsky and
+ A. Phanishayee and L. Tan and V. Vasudevan",
+ TITLE = "{FAWN: A Fast Array of Wimpy Nodes}",
+ BOOKTITLE = "Proceedings of the 22nd ACM Symposium on Operating Systems
+ Principles (SOSP '2009)",
+ PUBLISHER = "ACM SIGOPS",
+ MONTH = "October",
+ YEAR = 2009,
+ PAGES = "1--14",
+}
+
+@article{vldb_flashup,
+ author = {Koltsidas, Ioannis and Viglas, Stratis D.},
+ title = {Flashing up the storage layer},
+ journal = {Proc. VLDB Endow.},
+ issue_date = {August 2008},
+ volume = {1},
+ number = {1},
+ month = aug,
+ year = {2008},
+ issn = {2150-8097},
+ pages = {514--525},
+ numpages = {12},
+ url = {http://dx.doi.org/10.1145/1453856.1453913},
+ doi = {10.1145/1453856.1453913},
+ acmid = {1453913},
+ publisher = {VLDB Endowment},
+}
+
View
4 doc/report/eval.tex
@@ -391,8 +391,8 @@ \subsection{MRIS Read}
iostat. The results are shown in Figure~\ref{fig:mrisiostat}. We
observed that the throughput reported by iostat is much larger than
that in Figure~\ref{fig:mrisopssec}. Three factors contribute to the
-extra throughput: 1) the read-ahead in the file system, 2) extra read
-of file system metadata, and 3) extra read of database metadata.
+extra throughput: 1) the read-ahead in the filesystem, 2) extra read
+of filesystem metadata, and 3) extra read of database metadata.
However, it is interesting to notice that the data read from SSD for
the SSD setup is quite stable even when ratio varies dramatically.
This is something we need to further investigate.
View
18 doc/report/intro.tex
@@ -29,10 +29,20 @@ \section{Introduction}
popular out-of-place update optimization, as in log-structured
filesystems, by turning small in-place updates to revision logs as
metadata and compacting them into large batched I/O to bottom tiers.
-We implemented a size-tiered object storage system optimized for
-multi-resolution images, named MRIS (Multi-Resolution Image Store).
-MRIS aims at storing large amount of images, as well as their metadata
-and smaller versions (such as thumbnails), efficiently.
+
+Since multimedia files are primarily accessed sequentially, it may not
+be necessary to provide for efficient random access to every large
+file \cite{evans2002study}. Facebook researchers also argued that it
+may even be worthwhile to investigate not caching large objects in the
+memory at all, to increase overall cache hit rates
+\cite{kvworkload_sigmetrics}. We implemented a size-tiered object
+storage system optimized for multi-resolution images, named MRIS
+(Multi-Resolution Image Store). MRIS aims at storing large amount of
+images, as well as their metadata and smaller versions (such as
+thumbnails), efficiently. However, the strageties emploied in MRIS can
+be applied to other storage and web-serving systems as well because
+there also exist salient size characteristics following power-law
+distributions~\cite{kvworkload_sigmetrics}.
%We plan to use KVDB, which can be aware of multi-tier storage
%techniques as indicated in Rick's SOCC11 paper (GTSSL), as the
View
77 doc/report/main.aux
@@ -1,16 +1,19 @@
\relax
+\citation{evans2002study}
+\citation{kvworkload_sigmetrics}
+\citation{kvworkload_sigmetrics}
\citation{leveldb-web}
\citation{chang06osdi}
\@writefile{toc}{\contentsline {section}{\numberline {1}Introduction}{1}}
\newlabel{intro}{{1}{1}}
\@writefile{lof}{\contentsline {figure}{\numberline {1}{\ignorespaces SSTable}}{1}}
\newlabel{fig:sstable}{{1}{1}}
-\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces LevelDB Compaction}}{1}}
-\newlabel{fig:compact}{{2}{1}}
+\@writefile{toc}{\contentsline {section}{\numberline {2}Implementation}{1}}
+\newlabel{sec:implementation}{{2}{1}}
\citation{lsm}
\citation{level_lifetime}
-\@writefile{toc}{\contentsline {section}{\numberline {2}Implementation}{2}}
-\newlabel{sec:implementation}{{2}{2}}
+\@writefile{lof}{\contentsline {figure}{\numberline {2}{\ignorespaces LevelDB Compaction}}{2}}
+\newlabel{fig:compact}{{2}{2}}
\@writefile{lof}{\contentsline {figure}{\numberline {3}{\ignorespaces Large Space}}{2}}
\newlabel{fig:space}{{3}{2}}
\citation{filebench-web}
@@ -42,48 +45,66 @@
\@writefile{lot}{\contentsline {table}{\numberline {2}{\ignorespaces Speedup of SSD and Hybrid over SATA (ops/sec).}}{5}}
\newlabel{tbl:speedup}{{2}{5}}
\newlabel{eqn:ssdops}{{1}{5}}
-\newlabel{eqn:sataops}{{2}{5}}
-\newlabel{eqn:hybridops}{{3}{5}}
-\citation{eurosys_hfs}
\@writefile{lot}{\contentsline {table}{\numberline {3}{\ignorespaces Costs of read operations in time ($\mu $s). For instance, $t_{SF}$ is the time of reading a Small image from the Flash SSD.}}{6}}
\newlabel{tbl:variable}{{3}{6}}
\@writefile{lof}{\contentsline {figure}{\numberline {10}{\ignorespaces Modeled and benchmarked performance (ops/sec).}}{6}}
\newlabel{fig:opspred}{{10}{6}}
+\newlabel{eqn:sataops}{{2}{6}}
+\newlabel{eqn:hybridops}{{3}{6}}
+\newlabel{eqn:opsize}{{4}{6}}
+\newlabel{eqn:hybridthput}{{5}{6}}
\@writefile{lof}{\contentsline {figure}{\numberline {11}{\ignorespaces MRIS Read Performance (mb/sec).}}{6}}
\newlabel{fig:mrismbsec}{{11}{6}}
-\newlabel{eqn:opsize}{{4}{6}}
\@writefile{lof}{\contentsline {figure}{\numberline {12}{\ignorespaces Predicted and benchmarked read performance (mb/sec).}}{6}}
\newlabel{fig:thputpred}{{12}{6}}
\@writefile{lot}{\contentsline {table}{\numberline {4}{\ignorespaces Speedup of SSD and Hybrid over SATA (mb/sec).}}{6}}
\newlabel{tbl:spdupmb}{{4}{6}}
-\newlabel{eqn:hybridthput}{{5}{6}}
+\citation{eurosys_hfs}
+\citation{conquest_tos}
+\citation{umbrellafs_gos}
\citation{tablefs}
-\citation{Seltzer09hfad}
-\citation{evans2002study}
-\citation{kvworkload_sigmetrics}
-\citation{kvworkload_sigmetrics}
+\citation{socc11chisl}
+\citation{vldb_flashup}
+\citation{sosp09fawn}
+\citation{zhang2012multi}
+\citation{flashvm}
+\citation{eurosys_12_flashtier}
+\citation{flashcache}
+\citation{bcache}
\citation{Forney2002fast}
\bibstyle{plain}
\bibdata{../references}
-\bibcite{kvworkload_sigmetrics}{1}
-\bibcite{beaver2010finding}{2}
-\bibcite{chang06osdi}{3}
-\bibcite{evans2002study}{4}
-\bibcite{filebench-web}{5}
-\bibcite{Forney2002fast}{6}
-\bibcite{leveldb-web}{7}
-\bibcite{memcached}{8}
-\bibcite{lsm}{9}
-\bibcite{tablefs}{10}
+\bibcite{sosp09fawn}{1}
+\bibcite{kvworkload_sigmetrics}{2}
+\bibcite{bcache}{3}
+\bibcite{beaver2010finding}{4}
+\bibcite{chang06osdi}{5}
+\bibcite{evans2002study}{6}
\@writefile{lof}{\contentsline {figure}{\numberline {13}{\ignorespaces MRIS Read Performance (mb/sec) by iostat}}{7}}
\newlabel{fig:mrisiostat}{{13}{7}}
\@writefile{toc}{\contentsline {section}{\numberline {4}Related Work}{7}}
\newlabel{sec:related}{{4}{7}}
+\@writefile{toc}{\contentsline {paragraph}{(1) Hybrid Filesystems.}{7}}
+\@writefile{toc}{\contentsline {paragraph}{(2) Multi-tier Storage.}{7}}
+\@writefile{toc}{\contentsline {paragraph}{(3) Multi-level Caching.}{7}}
\@writefile{toc}{\contentsline {section}{\numberline {5}Conclusions}{7}}
\newlabel{sec:conc}{{5}{7}}
\@writefile{toc}{\contentsline {subsection}{\numberline {5.1}Future work}{7}}
-\bibcite{Seltzer09hfad}{11}
-\bibcite{level_lifetime}{12}
-\bibcite{wikimedia-foundation}{13}
-\bibcite{wikipedia-web}{14}
-\bibcite{eurosys_hfs}{15}
+\bibcite{filebench-web}{7}
+\bibcite{flashcache}{8}
+\bibcite{Forney2002fast}{9}
+\bibcite{umbrellafs_gos}{10}
+\bibcite{vldb_flashup}{11}
+\bibcite{leveldb-web}{12}
+\bibcite{memcached}{13}
+\bibcite{lsm}{14}
+\bibcite{tablefs}{15}
+\bibcite{flashvm}{16}
+\bibcite{eurosys_12_flashtier}{17}
+\bibcite{level_lifetime}{18}
+\bibcite{socc11chisl}{19}
+\bibcite{conquest_tos}{20}
+\bibcite{wikimedia-foundation}{21}
+\bibcite{wikipedia-web}{22}
+\bibcite{zhang2012multi}{23}
+\bibcite{eurosys_hfs}{24}
View
64 doc/report/main.blg
@@ -2,44 +2,44 @@ This is BibTeX, Version 0.99c (TeX Live 2009/Debian)
The top-level auxiliary file: main.aux
The style file: plain.bst
Database file #1: ../references.bib
-You've used 15 entries,
+You've used 24 entries,
2118 wiz_defined-function locations,
- 585 strings with 6664 characters,
-and the built_in function-call counts, 4655 in all, are:
-= -- 462
-> -- 214
-< -- 5
-+ -- 89
-- -- 72
-* -- 283
-:= -- 726
-add.period$ -- 47
-call.type$ -- 15
-change.case$ -- 85
+ 628 strings with 8096 characters,
+and the built_in function-call counts, 7632 in all, are:
+= -- 742
+> -- 372
+< -- 6
++ -- 154
+- -- 126
+* -- 499
+:= -- 1201
+add.period$ -- 72
+call.type$ -- 24
+change.case$ -- 142
chr.to.int$ -- 0
-cite$ -- 15
-duplicate$ -- 184
-empty$ -- 395
-format.name$ -- 72
-if$ -- 1024
+cite$ -- 24
+duplicate$ -- 290
+empty$ -- 636
+format.name$ -- 126
+if$ -- 1647
int.to.chr$ -- 0
-int.to.str$ -- 15
-missing$ -- 8
-newline$ -- 74
-num.names$ -- 20
-pop$ -- 121
+int.to.str$ -- 24
+missing$ -- 14
+newline$ -- 117
+num.names$ -- 34
+pop$ -- 193
preamble$ -- 1
-purify$ -- 70
+purify$ -- 117
quote$ -- 0
-skip$ -- 139
+skip$ -- 214
stack$ -- 0
-substring$ -- 198
-swap$ -- 44
-text.length$ -- 5
+substring$ -- 358
+swap$ -- 64
+text.length$ -- 6
text.prefix$ -- 0
top$ -- 0
-type$ -- 60
+type$ -- 96
warning$ -- 0
-while$ -- 34
-width$ -- 17
-write$ -- 161
+while$ -- 57
+width$ -- 26
+write$ -- 250
View
BIN  doc/report/main.dvi
Binary file not shown
View
58 doc/report/main.log
@@ -1,4 +1,4 @@
-This is pdfTeX, Version 3.1415926-1.40.10 (TeX Live 2009/Debian) (format=latex 2011.6.28) 14 DEC 2012 14:15
+This is pdfTeX, Version 3.1415926-1.40.10 (TeX Live 2009/Debian) (format=latex 2011.6.28) 14 DEC 2012 17:30
entering extended mode
%&-line parsing enabled.
**main.tex
@@ -500,16 +500,12 @@ Underfull \hbox (badness 3068) in paragraph at lines 20--32
\OT1/ptm/m/n/10 Lev-elDB is log-structured and or-ga-nizes data into
[]
-
-Underfull \vbox (badness 10000) has occurred while \output is active []
-
- [1
+[1
]
LaTeX Font Info: Try loading font information for OT1+pcr on input line 41.
-
-(/usr/share/texmf-texlive/tex/latex/psnfss/ot1pcr.fd
+ (/usr/share/texmf-texlive/tex/latex/psnfss/ot1pcr.fd
File: ot1pcr.fd 2001/06/04 font definitions for OT1/pcr.
)
File: figures/large-space.eps Graphic file (type eps)
@@ -523,10 +519,10 @@ Underfull \hbox (badness 7221) in paragraph at lines 138--147
) (./eval.tex
File: figures/ssd_vs_sata_read.eps Graphic file (type eps)
- <figures/ssd_vs_sata_read.eps>
+ <figures/ssd_vs_sata_read.eps> [3]
File: figures/ssd_vs_sata_write.eps Graphic file (type eps)
- <figures/ssd_vs_sata_write.eps>
-[3]
+
+<figures/ssd_vs_sata_write.eps>
File: figures/wiki-image.eps Graphic file (type eps)
<figures/wiki-image.eps> [4]
LaTeX Font Info: Font shape `OT1/ptm/bx/n' in size <7> not available
@@ -536,12 +532,16 @@ LaTeX Font Info: Font shape `OT1/ptm/bx/it' in size <10> not available
LaTeX Font Info: Font shape `OT1/ptm/bx/it' in size <9> not available
(Font) Font shape `OT1/ptm/b/it' tried instead on input line 176.
File: figures/mris-write-ops.eps Graphic file (type eps)
- <figures/mris-write-ops.eps>
-File: figures/mris-write-thput.eps Graphic file (type eps)
-<figures/mris-write-thput.eps>
+<figures/mris-write-ops.eps>
+File: figures/mris-write-thput.eps Graphic file (type eps)
+ <figures/mris-write-thput.eps>
File: figures/mris_ratio_ops.eps Graphic file (type eps)
- <figures/mris_ratio_ops.eps> [5]
+
+<figures/mris_ratio_ops.eps>
+Underfull \vbox (badness 1558) has occurred while \output is active []
+
+ [5]
File: figures/ratio_ops_predict.eps Graphic file (type eps)
<figures/ratio_ops_predict.eps>
@@ -549,39 +549,29 @@ File: figures/mris_ratio_thput.eps Graphic file (type eps)
<figures/mris_ratio_thput.eps>
File: figures/ratio_thput_predict.eps Graphic file (type eps)
-<figures/ratio_thput_predict.eps>
+<figures/ratio_thput_predict.eps> [6]
File: figures/mris_ratio_iostat_thput.eps Graphic file (type eps)
<figures/mris_ratio_iostat_thput.eps>)
-(./related.tex
-Underfull \vbox (badness 2197) has occurred while \output is active []
-
- [6]
-Missing character: There is no � in font ptmr7t!
-Missing character: There is no � in font ptmr7t!
-Missing character: There is no � in font ptmr7t!
-)
-(./conclusion.tex) (./main.bbl [7]
-Underfull \hbox (badness 10000) in paragraph at lines 82--87
+(./related.tex) (./conclusion.tex) (./main.bbl [7]
+Underfull \hbox (badness 10000) in paragraph at lines 114--119
\OT1/ptm/m/n/10 life-time. $\OT1/ptm/m/sl/8 https : / / groups . google . com
/ d / topic / leveldb /
[]
-Underfull \hbox (badness 10000) in paragraph at lines 94--96
+Underfull \hbox (badness 10000) in paragraph at lines 138--140
[]\OT1/ptm/m/n/10 Wikipedia. $\OT1/ptm/m/sl/8 http : / / wikimediafoundation .
org / wiki /
[]
-) [8
-
-] (./main.aux) )
+) [8] (./main.aux) )
Here is how much of TeX's memory you used:
- 9183 strings out of 495062
- 153726 string characters out of 1182644
- 211134 words of memory out of 3000000
- 12093 multiletter control sequences out of 15000+50000
+ 9192 strings out of 495062
+ 153847 string characters out of 1182644
+ 211170 words of memory out of 3000000
+ 12102 multiletter control sequences out of 15000+50000
20631 words of font info for 64 fonts, out of 3000000 for 9000
29 hyphenation exceptions out of 8191
54i,11n,51p,376b,345s stack positions out of 5000i,500n,10000p,200000b,50000s
-Output written on main.dvi (8 pages, 53236 bytes).
+Output written on main.dvi (8 pages, 56532 bytes).
View
108 doc/report/related.tex
@@ -1,53 +1,83 @@
\section{Related Work}
\label{sec:related}
+Our work of optimizating performance of a key/value store using hybrid
+storage devices is related to (1) hybrid filesystems, (2) multi-tier
+storage, and (3) multi-level caching.
-hFS~\cite{eurosys_hfs} also treats data differently based on their size and
-type. Metadata and small files are stored separately in a log partition like
-log-structured filesystem; data blocks of large regular files are stored in a
-partition in a FFS-like fashion. Our work provides a different user interface,
-which is an object store instead of a POSIX filesystem. Our work also focus
-more on different block storage techniques, which is under the filesystem
-layer.
+\paragraph{(1) Hybrid Filesystems}
-%\cite{fast_12_deindirection} also treats metadata and data differently, and
-%store them in virtual blocks and physical blocks respectively.
-
-TableFS \cite{tablefs} also proposed to use NoSQL store metadata and small
-files. However, TableFS is filesystem, which exposes POSIX interface to users,
-whereas our store is an object store that not restricted by the POSIX
-interface. They do not privide extra functionalities like transaction and
-journaling and they also did not consider using hybrid stroage media in their
-work.
+hFS~\cite{eurosys_hfs} is a hybrid filesystem which treats data
+differently based on their size and type. Metadata and small files are
+stored separately in a log partition like log-structured filesystem;
+data blocks of large regular files are stored in a partition in a
+FFS-like fashion. Similar to hFS, Conquest~\cite{conquest_tos} uses
+battery-backed RAM to hold metadata and small files. Only large files
+go to disk. Unlike hFS, UmbrellaFS~\cite{umbrellafs_gos} is a hybrid
+stackable filesystem sit bellow VFS but above general filesystems such
+as Ext2. UmbrellaFS is able to use different devices including SSD.
+TableFS \cite{tablefs} uses NoSQL store for metadata and small files.
+However, its main objective is to improve the performance of a
+filesystem using NoSQL store.
-hFAD \cite{Seltzer09hfad} described an tag-based object store API that supports
-full text search and it focused more on user interface. As our work is also
-able to provide tagging and text searching of metadata, we are more focused on
-workload specific performance optimization. Moreover, the design of hFAD is
-above block level storage, whereas our work integrate hybrid block levle
-stroage techniques into our storage system.
+Whereas all of them integrate hybrid techniques into the filesystem
+layer, our system lies in the application layer which is above the
+filesystem layer. It optimizes the operations of an object store,
+which provides a different interface from the POSIX filesystem
+interface. This is an important difference because the filesystem
+interface lacks application level knowledge, which is very useful in
+optimizating application performance.
-Since multimedia files are primarily accessed sequentially, it may not be
-necessary to provide for efficient random access to every large file.
-\cite{evans2002study}.
+\paragraph{(2) Multi-tier Storage}
+%
+GTSSL~\cite{socc11chisl}
-\cite{kvworkload_sigmetrics} strong locality metrics, such as keys accessed many
-millions of times a day, do not always suf- fice for a high hit rate; and there
-is still room for efficiency and hit rate improvements in Memcached’s
-implementation. We found that the salient size characteristics follow
-power-law distributions, sim- ilar to other storage and Web-serving systems
+Flashup~\cite{vldb_flashup}
-Why not cache in block level? Because block layer lacks the knowledge of
-objects and files.
+FAWN~\cite{sosp09fawn}
-It may even be worthwhile to investigate not caching large objects at all, to
-increase overall hit rates \cite{kvworkload_sigmetrics}.
+\paragraph{(3) Multi-level Caching}
+%
+Storage class memory such as Flash fills the gap between DRAM and HDD
+in termns of cost, capacity and performance. It can be considered
+either as backup for DRAM in the virtual memory layer or cache for HDD
+in the block layer. Zhang et al.~\cite{zhang2012multi} and Saxena et
+al.~\cite{flashvm} consider using Flash as backup of DRAM for paging,
+whereas FlashTier~\cite{eurosys_12_flashtier},
+FlashCache~\cite{flashcache} and Bcache~\cite{bcache} use Flash as
+block level cache. Our work resides in neither the virtual memory nor
+the block layer. It is agnostic to all the above-mentioned techniques.
Forney et al. \cite{Forney2002fast} proposed storage aware caching for
-heterogeneous storage systems. They made memory cache aware of the different
-replacement costs and partitioned the cache for different storage devices.
-However, their study is set in a different context which is a network-attached
-disk system. They did not consider data placement among different drives,
- which is an important strategy employed by our study.
+heterogeneous storage systems. They made memory cache aware of the
+different replacement costs and partitioned the cache for different
+storage devices. However, their study is set in a different context
+which is a network-attached disk system. They did not consider data
+placement among different drives, which is an important strategy of
+our study.
+
+%\cite{fast_12_deindirection} also treats metadata and data differently, and
+%store them in virtual blocks and physical blocks respectively.
+
+%hFAD \cite{Seltzer09hfad} described an tag-based object store API that
+%supports full text search and it focused more on user interface. As
+%our work is also able to provide tagging and text searching of
+%metadata, we are more focused on workload specific performance
+%optimization. Moreover, the design of hFAD is above block level
+%storage, whereas our work integrate hybrid block level stroage
+%techniques into our storage system.
+
+%\cite{kvworkload_sigmetrics} strong locality metrics, such as keys
+%accessed many millions of times a day, do not always suffice for a
+%high hit rate; and there is still room for efficiency and hit rate
+%improvements in Memcached’s implementation. We found that the salient
+%size characteristics follow power-law distributions, similar to
+%other storage and Web-serving systems
+
+%Why not cache in block level? Because block layer lacks the knowledge of
+%objects and files.
+
+%It may even be worthwhile to investigate not caching large objects at
+%all, to increase overall hit rates \cite{kvworkload_sigmetrics}.
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
%% For Emacs:

0 comments on commit 3f56954

Please sign in to comment.
Something went wrong with that request. Please try again.