-
-
Notifications
You must be signed in to change notification settings - Fork 627
/
store.rs
3491 lines (3118 loc) · 105 KB
/
store.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
use FileContent;
use bazel_protos;
use boxfuture::{try_future, BoxFuture, Boxable};
use bytes::Bytes;
use dirs;
use futures::{future, Future};
use hashing::Digest;
use protobuf::Message;
use serde_derive::Serialize;
use std::collections::HashMap;
use std::fs::OpenOptions;
use std::io::Write;
use std::os::unix::fs::OpenOptionsExt;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::Duration;
use parking_lot::Mutex;
use pool::ResettablePool;
// This is the maximum size any particular local LMDB store file is allowed to grow to.
// It doesn't reflect space allocated on disk, or RAM allocated (it may be reflected in VIRT but
// not RSS). There is no practical upper bound on this number, so we set it ridiculously high.
const MAX_LOCAL_STORE_SIZE_BYTES: usize = 1024 * 1024 * 1024 * 1024 / 10;
// This is the target number of bytes which should be present in all combined LMDB store files
// after garbage collection. We almost certainly want to make this configurable.
pub const DEFAULT_LOCAL_STORE_GC_TARGET_BYTES: usize = 4 * 1024 * 1024 * 1024;
// Summary of the files and directories uploaded with an operation
// ingested_file_{count, bytes}: Number and combined size of processed files
// uploaded_file_{count, bytes}: Number and combined size of files uploaded to the remote
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, Serialize)]
pub struct UploadSummary {
ingested_file_count: usize,
ingested_file_bytes: usize,
uploaded_file_count: usize,
uploaded_file_bytes: usize,
}
///
/// A content-addressed store of file contents, and Directories.
///
/// Store keeps content on disk, and can optionally delegate to backfill its on-disk storage by
/// fetching files from a remote server which implements the gRPC bytestream interface
/// (see https://github.com/googleapis/googleapis/blob/master/google/bytestream/bytestream.proto)
/// as specified by the gRPC remote execution interface (see
/// https://github.com/googleapis/googleapis/blob/master/google/devtools/remoteexecution/v1test/)
///
/// It can also write back to a remote gRPC server, but will only do so when explicitly instructed
/// to do so.
///
#[derive(Clone)]
pub struct Store {
local: local::ByteStore,
remote: Option<remote::ByteStore>,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum ShrinkBehavior {
///
/// Free up space in the store for future writes (marking pages as dirty), but don't proactively
/// free up the disk space that was used. This is fast and safe, but won't free up disk space.
///
Fast,
///
/// As with Fast, but also free up disk space from no-longer-used data. This may use extra disk
/// space temporarily while compaction is happening.
///
/// Note that any processes which have the Store open may need to re-open the Store after this
/// operation, as the underlying files may have been re-written.
///
Compact,
}
// Note that Store doesn't implement ByteStore because it operates at a higher level of abstraction,
// considering Directories as a standalone concept, rather than a buffer of bytes.
// This has the nice property that Directories can be trusted to be valid and canonical.
// We may want to re-visit this if we end up wanting to handle local/remote/merged interchangably.
impl Store {
///
/// Make a store which only uses its local storage.
///
pub fn local_only<P: AsRef<Path>>(path: P, pool: Arc<ResettablePool>) -> Result<Store, String> {
Ok(Store {
local: local::ByteStore::new(path, pool)?,
remote: None,
})
}
///
/// Make a store which uses local storage, and if it is missing a value which it tries to load,
/// will attempt to back-fill its local storage from a remote CAS.
///
#[cfg_attr(feature = "cargo-clippy", allow(too_many_arguments))]
pub fn with_remote<P: AsRef<Path>>(
path: P,
pool: Arc<ResettablePool>,
cas_address: &str,
instance_name: Option<String>,
root_ca_certs: Option<Vec<u8>>,
oauth_bearer_token: Option<String>,
thread_count: usize,
chunk_size_bytes: usize,
timeout: Duration,
) -> Result<Store, String> {
Ok(Store {
local: local::ByteStore::new(path, pool)?,
remote: Some(remote::ByteStore::new(
cas_address,
instance_name,
root_ca_certs,
oauth_bearer_token,
thread_count,
chunk_size_bytes,
timeout,
)),
})
}
// This default is also hard-coded into the Python options code in global_options.py
pub fn default_path() -> PathBuf {
match dirs::home_dir() {
Some(home_dir) => home_dir.join(".cache").join("pants").join("lmdb_store"),
None => panic!("Could not find home dir"),
}
}
///
/// Store a file locally.
///
pub fn store_file_bytes(&self, bytes: Bytes, initial_lease: bool) -> BoxFuture<Digest, String> {
self
.local
.store_bytes(EntryType::File, bytes, initial_lease)
.to_boxed()
}
///
/// Loads the bytes of the file with the passed fingerprint from the local store and back-fill
/// from remote when necessary and possible (i.e. when remote is configured), and returns the
/// result of applying f to that value.
///
pub fn load_file_bytes_with<T: Send + 'static, F: Fn(Bytes) -> T + Send + Sync + 'static>(
&self,
digest: Digest,
f: F,
) -> BoxFuture<Option<T>, String> {
// No transformation or verification is needed for files, so we pass in a pair of functions
// which always succeed, whether the underlying bytes are coming from a local or remote store.
// Unfortunately, we need to be a little verbose to do this.
let f_local = Arc::new(f);
let f_remote = f_local.clone();
self.load_bytes_with(
EntryType::File,
digest,
move |v: Bytes| Ok(f_local(v)),
move |v: Bytes| Ok(f_remote(v)),
)
}
///
/// Save the bytes of the Directory proto locally, without regard for any of the
/// contents of any FileNodes or DirectoryNodes therein (i.e. does not require that its
/// children are already stored).
///
pub fn record_directory(
&self,
directory: &bazel_protos::remote_execution::Directory,
initial_lease: bool,
) -> BoxFuture<Digest, String> {
let local = self.local.clone();
future::result(
directory
.write_to_bytes()
.map_err(|e| format!("Error serializing directory proto {:?}: {:?}", directory, e)),
).and_then(move |bytes| {
local.store_bytes(EntryType::Directory, Bytes::from(bytes), initial_lease)
}).to_boxed()
}
///
/// Loads a directory proto from the local store, back-filling from remote if necessary.
///
/// Guarantees that if an Ok Some value is returned, it is valid, and canonical, and its
/// fingerprint exactly matches that which is requested. Will return an Err if it would return a
/// non-canonical Directory.
///
pub fn load_directory(
&self,
digest: Digest,
) -> BoxFuture<Option<bazel_protos::remote_execution::Directory>, String> {
self.load_bytes_with(
EntryType::Directory,
digest,
// Trust that locally stored values were canonical when they were written into the CAS,
// don't bother to check this, as it's slightly expensive.
move |bytes: Bytes| {
let mut directory = bazel_protos::remote_execution::Directory::new();
directory.merge_from_bytes(&bytes).map_err(|e| {
format!(
"LMDB corruption: Directory bytes for {:?} were not valid: {:?}",
digest, e
)
})?;
Ok(directory)
},
// Eagerly verify that CAS-returned Directories are canonical, so that we don't write them
// into our local store.
move |bytes: Bytes| {
let mut directory = bazel_protos::remote_execution::Directory::new();
directory.merge_from_bytes(&bytes).map_err(|e| {
format!(
"CAS returned Directory proto for {:?} which was not valid: {:?}",
digest, e
)
})?;
bazel_protos::verify_directory_canonical(&directory)?;
Ok(directory)
},
)
}
///
/// Loads bytes from remote cas if required and possible (i.e. if remote is configured). Takes
/// two functions f_local and f_remote. These functions are any validation or transformations you
/// want to perform on the bytes received from the local and remote cas (if remote is configured).
///
fn load_bytes_with<
T: Send + 'static,
FLocal: Fn(Bytes) -> Result<T, String> + Send + Sync + 'static,
FRemote: Fn(Bytes) -> Result<T, String> + Send + Sync + 'static,
>(
&self,
entry_type: EntryType,
digest: Digest,
f_local: FLocal,
f_remote: FRemote,
) -> BoxFuture<Option<T>, String> {
let local = self.local.clone();
let maybe_remote = self.remote.clone();
self
.local
.load_bytes_with(entry_type, digest, f_local)
.and_then(
move |maybe_local_value| match (maybe_local_value, maybe_remote) {
(Some(value_result), _) => future::done(value_result.map(Some)).to_boxed(),
(None, None) => future::ok(None).to_boxed(),
(None, Some(remote)) => remote
.load_bytes_with(entry_type, digest, move |bytes: Bytes| bytes)
.and_then(move |maybe_bytes: Option<Bytes>| match maybe_bytes {
Some(bytes) => future::done(f_remote(bytes.clone()))
.and_then(move |value| {
local
.store_bytes(entry_type, bytes, true)
.and_then(move |stored_digest| {
if digest == stored_digest {
Ok(Some(value))
} else {
Err(format!(
"CAS gave wrong digest: expected {:?}, got {:?}",
digest, stored_digest
))
}
})
}).to_boxed(),
None => future::ok(None).to_boxed(),
}).to_boxed(),
},
).to_boxed()
}
///
/// Ensures that the remote ByteStore has a copy of each passed Fingerprint, including any files
/// contained in any Directories in the list.
///
/// Returns a structure with the summary of operations.
///
pub fn ensure_remote_has_recursive(
&self,
digests: Vec<Digest>,
) -> BoxFuture<UploadSummary, String> {
let remote = match self.remote {
Some(ref remote) => remote,
None => {
return future::err("Cannot ensure remote has blobs without a remote".to_owned()).to_boxed()
}
};
let mut expanding_futures = Vec::new();
let mut expanded_digests = HashMap::new();
for digest in digests {
match self.local.entry_type(&digest.0) {
Ok(Some(EntryType::File)) => {
expanded_digests.insert(digest, EntryType::File);
}
Ok(Some(EntryType::Directory)) => {
expanding_futures.push(self.expand_directory(digest));
}
Ok(None) => {
return future::err(format!("Failed to upload digest {:?}: Not found", digest)).to_boxed()
}
Err(err) => {
return future::err(format!("Failed to upload digest {:?}: {:?}", digest, err)).to_boxed()
}
};
}
let local = self.local.clone();
let remote = remote.clone();
let remote2 = remote.clone();
future::join_all(expanding_futures)
.map(move |futures| {
for mut digests in futures {
for (digest, entry_type) in digests.drain() {
expanded_digests.insert(digest, entry_type);
}
}
expanded_digests
}).and_then(move |ingested_digests| {
if Store::upload_is_faster_than_checking_whether_to_upload(&ingested_digests) {
return Ok((ingested_digests.keys().cloned().collect(), ingested_digests));
}
remote
.list_missing_digests(ingested_digests.keys())
.map(|digests_to_upload| (digests_to_upload, ingested_digests))
}).and_then(move |(digests_to_upload, ingested_digests)| {
future::join_all(
digests_to_upload
.into_iter()
.map(|digest| {
let entry_type = ingested_digests[&digest];
let remote = remote2.clone();
local
.load_bytes_with(entry_type, digest, move |bytes| remote.store_bytes(bytes))
.and_then(move |maybe_future| match maybe_future {
Some(future) => Ok(future),
None => Err(format!("Failed to upload digest {:?}: Not found", digest)),
})
}).collect::<Vec<_>>(),
).and_then(future::join_all)
.map(|uploaded_digests| (uploaded_digests, ingested_digests))
}).map(|(uploaded_digests, ingested_digests)| {
let ingested_file_sizes = ingested_digests.iter().map(|(digest, _)| digest.1);
let uploaded_file_sizes = uploaded_digests.iter().map(|digest| digest.1);
UploadSummary {
ingested_file_count: ingested_file_sizes.len(),
ingested_file_bytes: ingested_file_sizes.sum(),
uploaded_file_count: uploaded_file_sizes.len(),
uploaded_file_bytes: uploaded_file_sizes.sum(),
}
}).to_boxed()
}
///
/// Download a directory from Remote ByteStore recursively to the local one. Called only with the
/// Digest of a Directory.
///
pub fn ensure_local_has_recursive_directory(&self, dir_digest: Digest) -> BoxFuture<(), String> {
let store = self.clone();
self
.load_directory(dir_digest)
.and_then(move |directory_opt| {
directory_opt.ok_or_else(|| format!("Could not read dir with digest {:?}", dir_digest))
}).and_then(move |directory| {
// Traverse the files within directory
let file_futures = directory
.get_files()
.iter()
.map(|file_node| {
let file_digest = try_future!(file_node.get_digest().into());
store.load_bytes_with(EntryType::File, file_digest, |_| Ok(()), |_| Ok(()))
}).collect::<Vec<_>>();
// Recursively call with sub-directories
let directory_futures = directory
.get_directories()
.iter()
.map(move |child_dir| {
let child_digest = try_future!(child_dir.get_digest().into());
store.ensure_local_has_recursive_directory(child_digest)
}).collect::<Vec<_>>();
future::join_all(file_futures)
.join(future::join_all(directory_futures))
.map(|_| ())
}).to_boxed()
}
pub fn lease_all<'a, Ds: Iterator<Item = &'a Digest>>(&self, digests: Ds) -> Result<(), String> {
self.local.lease_all(digests)
}
pub fn garbage_collect(
&self,
target_size_bytes: usize,
shrink_behavior: ShrinkBehavior,
) -> Result<(), String> {
match self.local.shrink(target_size_bytes, shrink_behavior) {
Ok(size) => {
if size > target_size_bytes {
Err(format!(
"Garbage collection attempted to target {} bytes but could only shrink to {} bytes",
target_size_bytes, size
))
} else {
Ok(())
}
}
Err(err) => Err(format!("Garbage collection failed: {:?}", err)),
}
}
///
/// To check if it might be faster to upload the digests recursively
/// vs checking if the files are present first.
///
/// The values are guesses, feel free to tweak them.
///
fn upload_is_faster_than_checking_whether_to_upload(
digests: &HashMap<Digest, EntryType>,
) -> bool {
if digests.len() < 3 {
let mut num_bytes = 0;
for digest in digests.keys() {
num_bytes += digest.1;
}
num_bytes < 1024 * 1024
} else {
false
}
}
pub fn expand_directory(&self, digest: Digest) -> BoxFuture<HashMap<Digest, EntryType>, String> {
let accumulator = Arc::new(Mutex::new(HashMap::new()));
self
.expand_directory_helper(digest, accumulator.clone())
.map(|()| {
Arc::try_unwrap(accumulator)
.expect("Arc should have been unwrappable")
.into_inner()
}).to_boxed()
}
fn expand_directory_helper(
&self,
digest: Digest,
accumulator: Arc<Mutex<HashMap<Digest, EntryType>>>,
) -> BoxFuture<(), String> {
let store = self.clone();
self
.load_directory(digest)
.and_then(move |maybe_directory| match maybe_directory {
Some(directory) => {
{
let mut accumulator = accumulator.lock();
accumulator.insert(digest, EntryType::Directory);
for file in directory.get_files() {
accumulator.insert(try_future!(file.get_digest().into()), EntryType::File);
}
}
future::join_all(
directory
.get_directories()
.into_iter()
.map(move |subdir| {
store.clone().expand_directory_helper(
try_future!(subdir.get_digest().into()),
accumulator.clone(),
)
}).collect::<Vec<_>>(),
).map(|_| ())
.to_boxed()
}
None => future::err(format!("Could not expand unknown directory: {:?}", digest)).to_boxed(),
}).to_boxed()
}
///
/// Lays out the directory and all of its contents (files and directories) on disk so that a
/// process which uses the directory structure can run.
///
pub fn materialize_directory(
&self,
destination: PathBuf,
digest: Digest,
) -> BoxFuture<(), String> {
try_future!(super::safe_create_dir_all(&destination));
let store = self.clone();
self
.load_directory(digest)
.and_then(move |directory_opt| {
directory_opt.ok_or_else(|| format!("Directory with digest {:?} not found", digest))
}).and_then(move |directory| {
let file_futures = directory
.get_files()
.iter()
.map(|file_node| {
let store = store.clone();
let path = destination.join(file_node.get_name());
let digest = try_future!(file_node.get_digest().into());
store.materialize_file(path, digest, file_node.is_executable)
}).collect::<Vec<_>>();
let directory_futures = directory
.get_directories()
.iter()
.map(|directory_node| {
let store = store.clone();
let path = destination.join(directory_node.get_name());
let digest = try_future!(directory_node.get_digest().into());
store.materialize_directory(path, digest)
}).collect::<Vec<_>>();
future::join_all(file_futures)
.join(future::join_all(directory_futures))
.map(|_| ())
}).to_boxed()
}
fn materialize_file(
&self,
destination: PathBuf,
digest: Digest,
is_executable: bool,
) -> BoxFuture<(), String> {
self
.load_file_bytes_with(digest, move |bytes| {
OpenOptions::new()
.create(true)
.write(true)
.mode(if is_executable { 0o755 } else { 0o644 })
.open(&destination)
.and_then(|mut f| f.write_all(&bytes))
.map_err(|e| format!("Error writing file {:?}: {:?}", destination, e))
}).and_then(move |write_result| match write_result {
Some(Ok(())) => Ok(()),
Some(Err(e)) => Err(e),
None => Err(format!("File with digest {:?} not found", digest)),
}).to_boxed()
}
// Returns files sorted by their path.
pub fn contents_for_directory(
&self,
directory: &bazel_protos::remote_execution::Directory,
) -> BoxFuture<Vec<FileContent>, String> {
let accumulator = Arc::new(Mutex::new(HashMap::new()));
self
.contents_for_directory_helper(directory, PathBuf::new(), accumulator.clone())
.map(|()| {
let map = Arc::try_unwrap(accumulator).unwrap().into_inner();
let mut vec: Vec<FileContent> = map
.into_iter()
.map(|(path, content)| FileContent { path, content })
.collect();
vec.sort_by(|l, r| l.path.cmp(&r.path));
vec
}).to_boxed()
}
// Assumes that all fingerprints it encounters are valid.
fn contents_for_directory_helper(
&self,
directory: &bazel_protos::remote_execution::Directory,
path_so_far: PathBuf,
contents_wrapped: Arc<Mutex<HashMap<PathBuf, Bytes>>>,
) -> BoxFuture<(), String> {
let contents_wrapped_copy = contents_wrapped.clone();
let path_so_far_copy = path_so_far.clone();
let store_copy = self.clone();
let file_futures = future::join_all(
directory
.get_files()
.iter()
.map(move |file_node| {
let path = path_so_far_copy.join(file_node.get_name());
let contents_wrapped_copy = contents_wrapped_copy.clone();
store_copy
.load_file_bytes_with(try_future!(file_node.get_digest().into()), |b| b)
.and_then(move |maybe_bytes| {
maybe_bytes
.ok_or_else(|| format!("Couldn't find file contents for {:?}", path))
.map(move |bytes| {
let mut contents = contents_wrapped_copy.lock();
contents.insert(path, bytes);
})
}).to_boxed()
}).collect::<Vec<_>>(),
);
let store = self.clone();
let dir_futures = future::join_all(
directory
.get_directories()
.into_iter()
.map(move |dir_node| {
let digest = try_future!(dir_node.get_digest().into());
let path = path_so_far.join(dir_node.get_name());
let store = store.clone();
let contents_wrapped = contents_wrapped.clone();
store
.load_directory(digest)
.and_then(move |maybe_dir| {
maybe_dir
.ok_or_else(|| format!("Could not find sub-directory with digest {:?}", digest))
})
.and_then(move |dir| store.contents_for_directory_helper(&dir, path, contents_wrapped))
.to_boxed()
}).collect::<Vec<_>>(),
);
file_futures.join(dir_futures).map(|(_, _)| ()).to_boxed()
}
}
// Only public for testing.
#[derive(Copy, Clone, Debug, Eq, Hash, PartialEq, Ord, PartialOrd)]
pub enum EntryType {
Directory,
File,
}
mod local {
use super::{EntryType, ShrinkBehavior};
use boxfuture::{BoxFuture, Boxable};
use byteorder::{ByteOrder, LittleEndian};
use bytes::Bytes;
use digest::{Digest as DigestTrait, FixedOutput};
use futures::future;
use hashing::{Digest, Fingerprint};
use lmdb::Error::{KeyExist, NotFound};
use lmdb::{
self, Cursor, Database, DatabaseFlags, Environment, EnvironmentCopyFlags, EnvironmentFlags,
RwTransaction, Transaction, WriteFlags,
};
use log::{debug, error};
use sha2::Sha256;
use std;
use std::collections::{BinaryHeap, HashMap};
use std::fmt;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time;
use tempfile::TempDir;
use super::super::EMPTY_DIGEST;
use super::MAX_LOCAL_STORE_SIZE_BYTES;
use pool::ResettablePool;
#[derive(Clone)]
pub struct ByteStore {
inner: Arc<InnerStore>,
}
struct InnerStore {
pool: Arc<ResettablePool>,
// Store directories separately from files because:
// 1. They may have different lifetimes.
// 2. It's nice to know whether we should be able to parse something as a proto.
file_dbs: Result<Arc<ShardedLmdb>, String>,
directory_dbs: Result<Arc<ShardedLmdb>, String>,
}
impl ByteStore {
pub fn new<P: AsRef<Path>>(path: P, pool: Arc<ResettablePool>) -> Result<ByteStore, String> {
let root = path.as_ref();
let files_root = root.join("files");
let directories_root = root.join("directories");
Ok(ByteStore {
inner: Arc::new(InnerStore {
pool: pool,
file_dbs: ShardedLmdb::new(files_root.clone()).map(Arc::new),
directory_dbs: ShardedLmdb::new(directories_root.clone()).map(Arc::new),
}),
})
}
// Note: This performs IO on the calling thread. Hopefully the IO is small enough not to matter.
pub fn entry_type(&self, fingerprint: &Fingerprint) -> Result<Option<EntryType>, String> {
{
let (env, directory_database, _) = self.inner.directory_dbs.clone()?.get(fingerprint);
let txn = env
.begin_ro_txn()
.map_err(|err| format!("Failed to begin read transaction: {:?}", err))?;
match txn.get(directory_database, &fingerprint.as_ref()) {
Ok(_) => return Ok(Some(EntryType::Directory)),
Err(NotFound) => {}
Err(err) => {
return Err(format!(
"Error reading from store when determining type of fingerprint {}: {}",
fingerprint, err
))
}
};
}
let (env, file_database, _) = self.inner.file_dbs.clone()?.get(fingerprint);
let txn = env
.begin_ro_txn()
.map_err(|err| format!("Failed to begin read transaction: {}", err))?;
match txn.get(file_database, &fingerprint.as_ref()) {
Ok(_) => return Ok(Some(EntryType::File)),
Err(NotFound) => {}
Err(err) => {
return Err(format!(
"Error reading from store when determining type of fingerprint {}: {}",
fingerprint, err
))
}
};
Ok(None)
}
pub fn lease_all<'a, Ds: Iterator<Item = &'a Digest>>(
&self,
digests: Ds,
) -> Result<(), String> {
let until = Self::default_lease_until_secs_since_epoch();
for digest in digests {
let (env, _, lease_database) = self.inner.file_dbs.clone()?.get(&digest.0);
env
.begin_rw_txn()
.and_then(|mut txn| self.lease(lease_database, &digest.0, until, &mut txn))
.map_err(|err| format!("Error leasing digest {:?}: {}", digest, err))?;
}
Ok(())
}
fn default_lease_until_secs_since_epoch() -> u64 {
let now_since_epoch = time::SystemTime::now()
.duration_since(time::UNIX_EPOCH)
.expect("Surely you're not before the unix epoch?");
(now_since_epoch + time::Duration::from_secs(2 * 60 * 60)).as_secs()
}
fn lease(
&self,
database: Database,
fingerprint: &Fingerprint,
until_secs_since_epoch: u64,
txn: &mut RwTransaction,
) -> Result<(), lmdb::Error> {
let mut buf = [0; 8];
LittleEndian::write_u64(&mut buf, until_secs_since_epoch);
txn.put(database, &fingerprint.as_ref(), &buf, WriteFlags::empty())
}
///
/// Attempts to shrink the stored files to be no bigger than target_bytes
/// (excluding lmdb overhead).
///
/// Returns the size it was shrunk to, which may be larger than target_bytes.
///
/// Ignores directories. TODO: Shrink directories.
///
/// TODO: Use LMDB database statistics when lmdb-rs exposes them.
///
pub fn shrink(
&self,
target_bytes: usize,
shrink_behavior: ShrinkBehavior,
) -> Result<usize, String> {
let mut used_bytes: usize = 0;
let mut fingerprints_by_expired_ago = BinaryHeap::new();
self.aged_fingerprints(
EntryType::File,
&mut used_bytes,
&mut fingerprints_by_expired_ago,
)?;
self.aged_fingerprints(
EntryType::Directory,
&mut used_bytes,
&mut fingerprints_by_expired_ago,
)?;
while used_bytes > target_bytes {
let aged_fingerprint = fingerprints_by_expired_ago
.pop()
.expect("lmdb corruption detected, sum of size of blobs exceeded stored blobs");
if aged_fingerprint.expired_seconds_ago == 0 {
// Ran out of expired blobs - everything remaining is leased and cannot be collected.
return Ok(used_bytes);
}
let lmdbs = match aged_fingerprint.entry_type {
EntryType::File => self.inner.file_dbs.clone(),
EntryType::Directory => self.inner.directory_dbs.clone(),
};
let (env, database, lease_database) = lmdbs.clone()?.get(&aged_fingerprint.fingerprint);
{
env
.begin_rw_txn()
.and_then(|mut txn| {
txn.del(database, &aged_fingerprint.fingerprint.as_ref(), None)?;
txn
.del(lease_database, &aged_fingerprint.fingerprint.as_ref(), None)
.or_else(|err| match err {
NotFound => Ok(()),
err => Err(err),
})?;
used_bytes -= aged_fingerprint.size_bytes;
txn.commit()
}).map_err(|err| format!("Error garbage collecting: {}", err))?;
}
}
if shrink_behavior == ShrinkBehavior::Compact {
self.inner.file_dbs.clone()?.compact()?;
}
Ok(used_bytes)
}
fn aged_fingerprints(
&self,
entry_type: EntryType,
used_bytes: &mut usize,
fingerprints_by_expired_ago: &mut BinaryHeap<AgedFingerprint>,
) -> Result<(), String> {
let database = match entry_type {
EntryType::File => self.inner.file_dbs.clone(),
EntryType::Directory => self.inner.directory_dbs.clone(),
};
for &(ref env, ref database, ref lease_database) in &database?.all_lmdbs() {
let txn = env
.begin_ro_txn()
.map_err(|err| format!("Error beginning transaction to garbage collect: {}", err))?;
let mut cursor = txn
.open_ro_cursor(*database)
.map_err(|err| format!("Failed to open lmdb read cursor: {}", err))?;
for (key, bytes) in cursor.iter() {
*used_bytes += bytes.len();
// Random access into the lease_database is slower than iterating, but hopefully garbage
// collection is rare enough that we can get away with this, rather than do two passes
// here (either to populate leases into pre-populated AgedFingerprints, or to read sizes
// when we delete from lmdb to track how much we've freed).
let lease_until_unix_timestamp = txn
.get(*lease_database, &key)
.map(|b| LittleEndian::read_u64(b))
.unwrap_or_else(|e| match e {
NotFound => 0,
e => panic!("Error reading lease, probable lmdb corruption: {:?}", e),
});
let leased_until =
time::UNIX_EPOCH + time::Duration::from_secs(lease_until_unix_timestamp);
let expired_seconds_ago = time::SystemTime::now()
.duration_since(leased_until)
.map(|t| t.as_secs())
// 0 indicates unleased.
.unwrap_or(0);
fingerprints_by_expired_ago.push(AgedFingerprint {
expired_seconds_ago: expired_seconds_ago,
fingerprint: Fingerprint::from_bytes_unsafe(key),
size_bytes: bytes.len(),
entry_type: entry_type,
});
}
}
Ok(())
}
pub fn store_bytes(
&self,
entry_type: EntryType,
bytes: Bytes,
initial_lease: bool,
) -> BoxFuture<Digest, String> {
let dbs = match entry_type {
EntryType::Directory => self.inner.directory_dbs.clone(),
EntryType::File => self.inner.file_dbs.clone(),
};
let bytestore = self.clone();
self
.inner
.pool
.spawn_fn(move || {
let fingerprint = {
let mut hasher = Sha256::default();
hasher.input(&bytes);
Fingerprint::from_bytes_unsafe(hasher.fixed_result().as_slice())
};
let digest = Digest(fingerprint, bytes.len());
let (env, content_database, lease_database) = dbs.clone()?.get(&fingerprint);
let put_res = env.begin_rw_txn().and_then(|mut txn| {
txn.put(
content_database,
&fingerprint,
&bytes,
WriteFlags::NO_OVERWRITE,
)?;
if initial_lease {
bytestore.lease(
lease_database,
&fingerprint,
Self::default_lease_until_secs_since_epoch(),
&mut txn,
)?;
}
txn.commit()
});
match put_res {
Ok(()) => Ok(digest),
Err(KeyExist) => Ok(digest),
Err(err) => Err(format!("Error storing digest {:?}: {}", digest, err)),
}
}).to_boxed()
}
pub fn load_bytes_with<T: Send + 'static, F: Fn(Bytes) -> T + Send + Sync + 'static>(
&self,
entry_type: EntryType,
digest: Digest,
f: F,
) -> BoxFuture<Option<T>, String> {
if digest == EMPTY_DIGEST {
// Avoid expensive I/O for this super common case.
// Also, this allows some client-provided operations (like merging snapshots) to work
// without needing to first store the empty snapshot.
return future::ok(Some(f(Bytes::new()))).to_boxed();
}
let dbs = match entry_type {
EntryType::Directory => self.inner.directory_dbs.clone(),
EntryType::File => self.inner.file_dbs.clone(),
};
self
.inner
.pool
.spawn_fn(move || {
let (env, db, _) = dbs.clone()?.get(&digest.0);
let ro_txn = env
.begin_ro_txn()
.map_err(|err| format!("Failed to begin read transaction: {}", err));
ro_txn.and_then(|txn| match txn.get(db, &digest.0) {
Ok(bytes) => {
if bytes.len() == digest.1 {
Ok(Some(f(Bytes::from(bytes))))
} else {
error!("Got hash collision reading from store - digest {:?} was requested, but retrieved bytes with that fingerprint had length {}. Congratulations, you may have broken sha256! Underlying bytes: {:?}", digest, bytes.len(), bytes);
Ok(None)
}
}
Err(NotFound) => Ok(None),
Err(err) => Err(format!("Error loading digest {:?}: {}", digest, err,)),
})
}).to_boxed()
}
}
// Each LMDB directory can have at most one concurrent writer.
// We use this type to shard storage into 16 LMDB directories, based on the first 4 bits of the
// fingerprint being stored, so that we can write to them in parallel.
#[derive(Clone)]
struct ShardedLmdb {
// First Database is content, second is leases.
lmdbs: HashMap<u8, (Arc<Environment>, Database, Database)>,
root_path: PathBuf,
}
impl ShardedLmdb {
pub fn new(root_path: PathBuf) -> Result<ShardedLmdb, String> {
debug!("Initializing ShardedLmdb at root {:?}", root_path);
let mut lmdbs = HashMap::new();
for (env, dir, fingerprint_prefix) in ShardedLmdb::envs(&root_path)? {
debug!("Making ShardedLmdb content database for {:?}", dir);
let content_database = env
.create_db(Some("content"), DatabaseFlags::empty())
.map_err(|e| {
format!(
"Error creating/opening content database at {:?}: {}",
dir, e
)
})?;
debug!("Making ShardedLmdb lease database for {:?}", dir);
let lease_database = env
.create_db(Some("leases"), DatabaseFlags::empty())
.map_err(|e| {
format!(
"Error creating/opening content database at {:?}: {}",
dir, e
)
})?;
lmdbs.insert(
fingerprint_prefix,
(Arc::new(env), content_database, lease_database),
);
}