/
manifest.rs
1283 lines (1127 loc) · 46.1 KB
/
manifest.rs
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
pub mod hash;
#[cfg(test)]
mod tests {
mod compatibility;
mod computation;
}
use super::CheckpointError;
use crate::{
manifest::hash::{meta_manifest_hasher, sub_manifest_hasher},
BundledManifest, DirtyPages, FileType, ManifestMetrics, CRITICAL_ERROR_REUSED_CHUNK_HASH,
LABEL_VALUE_HASHED, LABEL_VALUE_HASHED_AND_COMPARED, LABEL_VALUE_REUSED,
};
use bit_vec::BitVec;
use hash::{chunk_hasher, file_hasher, manifest_hasher, ManifestHash};
use ic_crypto_sha::Sha256;
use ic_logger::{error, fatal, warn, ReplicaLogger};
use ic_replicated_state::PageIndex;
use ic_state_layout::{CheckpointLayout, ReadOnly};
use ic_sys::{mmap::ScopedMmap, PAGE_SIZE};
use ic_types::{
crypto::CryptoHash,
state_sync::{
encode_manifest, ChunkInfo, FileGroupChunks, FileInfo, Manifest, MetaManifest,
FILE_GROUP_CHUNK_ID_OFFSET,
},
CryptoHashOfState, Height,
};
use rand::{Rng, SeedableRng};
use rand_chacha::ChaChaRng;
use std::collections::{BTreeMap, HashMap, HashSet};
use std::fmt;
use std::ops::Range;
use std::path::{Path, PathBuf};
use std::sync::{Arc, Mutex, Weak};
pub use ic_types::state_sync::DEFAULT_CHUNK_SIZE;
/// Initial version.
pub const STATE_SYNC_V1: u32 = 1;
/// Compute the manifest hash based on the encoded manifest.
pub const STATE_SYNC_V2: u32 = 2;
/// The version of StateSync protocol that should be used for all newly created manifests.
pub const CURRENT_STATE_SYNC_VERSION: u32 = STATE_SYNC_V2;
/// Maximum supported StateSync version.
///
/// The replica will panic if trying to deal with a manifest with a version higher than this.
pub const MAX_SUPPORTED_STATE_SYNC_VERSION: u32 = STATE_SYNC_V2;
/// When computing a manifest, we recompute the hash of every
/// `REHASH_EVERY_NTH_CHUNK` chunk, even if we know it to be unchanged and
/// have a hash computed earlier by this replica process.
const REHASH_EVERY_NTH_CHUNK: u64 = 10;
/// During the downloading phase of state sync, We group certain files together
/// which have filenames ending with `FILE_TO_GROUP`.
///
/// We make the decision to group `canister.pbuf` files for two main reasons:
/// 1. They are small in general, usually less than 1 KiB.
/// 2. They change between checkpoints, so we always have to fetch them.
const FILE_TO_GROUP: &str = "canister.pbuf";
/// The size of files to group should be less or equal to the `FILE_GROUP_SIZE_LIMIT`
/// to guarantee the efficiency of grouping.
///
/// The number is chosen heuristically for two reasons:
/// 1. It will cover most of `canister.pbuf` files if not all of them.
/// 2. `DEFAULT_CHUNK_SIZE` is 128 times of it. It means the number of chunks
/// will decrease by at least two orders of magnitude, which is significant enough.
const MAX_FILE_SIZE_TO_GROUP: u32 = 1 << 13; // 8 KiB
#[derive(Debug, PartialEq, Eq)]
pub enum ManifestValidationError {
InvalidRootHash {
expected_hash: Vec<u8>,
actual_hash: Vec<u8>,
},
InvalidFileHash {
relative_path: PathBuf,
expected_hash: Vec<u8>,
actual_hash: Vec<u8>,
},
UnsupportedManifestVersion {
manifest_version: u32,
max_supported_version: u32,
},
}
impl fmt::Display for ManifestValidationError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::InvalidRootHash {
expected_hash,
actual_hash,
} => write!(
f,
"manifest root hash mismatch, expected {}, got {}",
hex::encode(&expected_hash[..]),
hex::encode(&actual_hash[..])
),
Self::InvalidFileHash {
relative_path,
expected_hash,
actual_hash,
} => write!(
f,
"file {} hash mismatch, expected {}, got {}",
relative_path.display(),
hex::encode(&expected_hash[..]),
hex::encode(&actual_hash[..])
),
Self::UnsupportedManifestVersion {
manifest_version,
max_supported_version,
} => write!(
f,
"manifest version {} not supported, maximum supported version {}",
manifest_version, max_supported_version,
),
}
}
}
impl std::error::Error for ManifestValidationError {}
#[derive(Debug, PartialEq, Eq)]
pub enum ChunkValidationError {
InvalidChunkHash {
chunk_ix: usize,
expected_hash: Vec<u8>,
actual_hash: Vec<u8>,
},
InvalidChunkSize {
chunk_ix: usize,
expected_size: usize,
actual_size: usize,
},
InvalidChunkIndex {
chunk_ix: usize,
actual_length: usize,
},
}
impl fmt::Display for ChunkValidationError {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
Self::InvalidChunkHash {
chunk_ix,
expected_hash,
actual_hash,
} => write!(
f,
"chunk {} hash mismatch, expected {}, got {}",
chunk_ix,
hex::encode(&expected_hash[..]),
hex::encode(&actual_hash[..])
),
Self::InvalidChunkSize {
chunk_ix,
expected_size,
actual_size,
} => write!(
f,
"chunk {} size mismatch, expected {}, got {}",
chunk_ix, expected_size, actual_size
),
Self::InvalidChunkIndex {
chunk_ix,
actual_length,
} => write!(
f,
"chunk index {} is out of the vector length {}",
chunk_ix, actual_length
),
}
}
}
impl std::error::Error for ChunkValidationError {}
/// Relative path to a file and the size of the file.
#[derive(Clone)]
struct FileWithSize(PathBuf, u64);
#[derive(Debug, Clone, PartialEq, Eq)]
enum ChunkAction {
/// Recompute the hash of the chunk, as no previously computed hash is
/// available
Recompute,
/// There is a previously computed hash for this chunk, but recompute it
/// anyway and record an error metric if there is a mismatch
RecomputeAndCompare([u8; 32]),
/// Use the previously computed hash for this chunk
UseHash([u8; 32]),
}
// An index into some table in the _new_ manifest file.
pub type NewIndex = usize;
// An index into some table in the _old_ manifest file.
pub type OldIndex = usize;
/// A script describing how to turn an old state into a new state.
#[derive(Debug, PartialEq, Eq)]
pub struct DiffScript {
/// Copy some files from the old state.
/// Keys are indices of the file table in the new manifest file,
/// values are indices of the file table in the old manifest file.
pub(crate) copy_files: HashMap<NewIndex, OldIndex>,
/// Re-use existing chunks from the old state.
/// Chunks that belong to the `copy_files` key space are excluded.
/// Keys are indices of the chunk table in the new manifest file,
/// values are indices of the chunk table in the old manifest file.
pub(crate) copy_chunks: HashMap<NewIndex, OldIndex>,
/// Fetch this set of chunks from the peers and apply them.
pub(crate) fetch_chunks: HashSet<NewIndex>,
/// Number of all-zero chunks used for metrics.
pub(crate) zeros_chunks: u32,
}
/// ManifestDelta contains a manifest of an old state and indices of all the
/// memory pages that changed (became "dirty") since that state.
///
/// This data allows us to speed up manifest computation: we can map dirty page
/// indices back to chunks and avoid re-computing chunks that haven't changed
/// since the previous manifest computation.
pub struct ManifestDelta {
/// Manifest of the state at `base_height`.
pub(crate) base_manifest: Manifest,
/// Height of the base state.
pub(crate) base_height: Height,
/// Current height
pub(crate) target_height: Height,
/// Wasm memory and stable memory pages that might have changed since the
/// state at `base_height`.
pub(crate) dirty_memory_pages: DirtyPages,
}
/// Groups small files into larger chunks.
///
/// Builds the grouping of how files should be put together into a single chunk and
/// returns the mapping from chunk id to the grouped chunk indices.
/// The grouping is deterministic to ensure that the sender assembles the file
/// in such a way that the receiver can split it back just by looking at the manifest.
pub(crate) fn build_file_group_chunks(manifest: &Manifest) -> FileGroupChunks {
let mut file_group_chunks: BTreeMap<u32, Vec<u32>> = BTreeMap::new();
if manifest.chunk_table.len() >= FILE_GROUP_CHUNK_ID_OFFSET as usize {
return FileGroupChunks::new(file_group_chunks);
}
let mut chunk_id_p2p = FILE_GROUP_CHUNK_ID_OFFSET;
let mut chunk_table_indices: Vec<u32> = Vec::new();
let mut bytes_left = DEFAULT_CHUNK_SIZE as u64;
for (file_index, f) in manifest.file_table.iter().enumerate() {
if !f.relative_path.ends_with(FILE_TO_GROUP)
|| f.size_bytes > MAX_FILE_SIZE_TO_GROUP as u64
|| f.size_bytes >= DEFAULT_CHUNK_SIZE as u64
{
continue;
}
if bytes_left < f.size_bytes {
file_group_chunks.insert(chunk_id_p2p, std::mem::take(&mut chunk_table_indices));
chunk_id_p2p += 1;
bytes_left = DEFAULT_CHUNK_SIZE as u64;
}
bytes_left -= f.size_bytes;
let chunk_range = file_chunk_range(&manifest.chunk_table, file_index);
chunk_table_indices.extend(chunk_range.map(|i| i as u32));
}
if !chunk_table_indices.is_empty() {
file_group_chunks.insert(chunk_id_p2p, chunk_table_indices);
}
FileGroupChunks::new(file_group_chunks)
}
fn write_chunk_hash(hasher: &mut Sha256, chunk_info: &ChunkInfo) {
chunk_info.file_index.update_hash(hasher);
chunk_info.size_bytes.update_hash(hasher);
chunk_info.offset.update_hash(hasher);
chunk_info.hash.update_hash(hasher);
}
/// Returns the number of chunks of size `max_chunk_size` required to cover a
/// file of size `size_bytes`.
fn count_chunks(size_bytes: u64, max_chunk_size: u32) -> usize {
(size_bytes as usize + max_chunk_size as usize - 1) / max_chunk_size as usize
}
/// Checks if the manifest was computed using specified max_chunk_size.
fn uses_chunk_size(manifest: &Manifest, max_chunk_size: u32) -> bool {
manifest.chunk_table.iter().all(|chunk| {
chunk.size_bytes == max_chunk_size
|| chunk.size_bytes as u64 + chunk.offset
== manifest.file_table[chunk.file_index as usize].size_bytes
})
}
// Computes file_table and chunk_table of a manifest using a parallel algorithm.
// All the parallel work is spawned in the specified thread pool.
fn build_chunk_table_parallel(
thread_pool: &mut scoped_threadpool::Pool,
metrics: &ManifestMetrics,
log: &ReplicaLogger,
root: &Path,
files: Vec<FileWithSize>,
max_chunk_size: u32,
chunk_actions: Vec<ChunkAction>,
) -> (Vec<FileInfo>, Vec<ChunkInfo>) {
// Build a chunk table and file table filled with blank hashes.
let mut chunk_table: Vec<ChunkInfo> = {
let mut chunks = Vec::with_capacity(chunk_actions.len());
for (file_index, FileWithSize(_, size_bytes)) in files.iter().enumerate() {
let n = count_chunks(*size_bytes, max_chunk_size);
for i in 0..n {
let offset = i as u64 * max_chunk_size as u64;
let size_bytes = (size_bytes - offset).min(max_chunk_size as u64) as u32;
chunks.push(ChunkInfo {
file_index: file_index as u32,
offset,
size_bytes,
hash: [0; 32],
});
}
}
chunks
};
assert_eq!(chunk_table.len(), chunk_actions.len());
let mut file_table: Vec<FileInfo> = files
.into_iter()
.map(|FileWithSize(relative_path, size_bytes)| FileInfo {
relative_path,
size_bytes,
hash: [0; 32],
})
.collect();
// We cache the files that are currently being hashed to avoid opening them
// individually for each chunk. The values in the cache are weak references,
// so the last thread that has a strong reference will release the value
// and close the corresponding file.
// This way we keep the number of files opened at the same time
// low (it doesn't exceed the number of the threads).
let file_cache: Arc<Mutex<HashMap<u32, Weak<ScopedMmap>>>> =
Arc::new(Mutex::new(HashMap::new()));
// Compute real chunk hashes in parallel.
// NB. We must populate hashes of all the chunks in a file before we compute
// file hashes.
thread_pool.scoped(|scope| {
for (chunk_idx, chunk_info) in chunk_table.iter_mut().enumerate() {
let chunk_action = chunk_actions[chunk_idx].clone();
let file_path = root.join(&file_table[chunk_info.file_index as usize].relative_path);
let file_size = file_table[chunk_info.file_index as usize].size_bytes;
let file_cache = Arc::clone(&file_cache);
scope.execute(move || {
let recompute_chunk_hash = || {
let mmap: Arc<ScopedMmap> = if file_size > max_chunk_size as u64 {
// We only use the file cache if there is more than one chunk in the file,
// otherwise the synchronization cost is unnecessary.
let mut cache = file_cache.lock().unwrap();
match cache.get(&chunk_info.file_index).and_then(Weak::upgrade) {
Some(mmap) => mmap,
None => {
let mmap = Arc::new(
ScopedMmap::from_path(&file_path)
.unwrap_or_else(|e| fatal!(log, "failed to mmap file {}: {}", file_path.display(), e)),
);
cache.insert(chunk_info.file_index, Arc::downgrade(&mmap));
mmap
}
}
} else {
Arc::new(
ScopedMmap::from_path(&file_path)
.unwrap_or_else(|e| fatal!(log, "failed to mmap file {}: {}", file_path.display(), e))
)
};
let data = mmap.as_slice();
let mut hasher = chunk_hasher();
let chunk_start = chunk_info.offset as usize;
let chunk_end = chunk_start + chunk_info.size_bytes as usize;
hasher.write(&data[chunk_start..chunk_end]);
hasher.finish()
};
chunk_info.hash = match chunk_action {
ChunkAction::Recompute => {
metrics.chunk_bytes.with_label_values(&[LABEL_VALUE_HASHED]).inc_by(chunk_info.size_bytes as u64);
recompute_chunk_hash()
},
ChunkAction::RecomputeAndCompare(precomputed_hash) => {
metrics.chunk_bytes.with_label_values(&[LABEL_VALUE_HASHED_AND_COMPARED]).inc_by(chunk_info.size_bytes as u64);
let recomputed_hash = recompute_chunk_hash();
debug_assert_eq!(recomputed_hash, precomputed_hash);
if recomputed_hash != precomputed_hash {
metrics.reused_chunk_hash_error_count.inc();
error!(
log,
"{}: Hash mismatch in chunk with index {} in file {}, recomputed hash {:?}, reused hash {:?}",
CRITICAL_ERROR_REUSED_CHUNK_HASH,
chunk_idx,
file_path.display(),
chunk_info.hash,
precomputed_hash
);
}
recomputed_hash
}
ChunkAction::UseHash(precomputed_hash) => {
metrics.chunk_bytes.with_label_values(&[LABEL_VALUE_REUSED]).inc_by(chunk_info.size_bytes as u64);
precomputed_hash
},
};
});
}
});
// After we computed all the chunk hashes, we can finally compute file hashes.
for (file_index, file_info) in file_table.iter_mut().enumerate() {
let mut hasher = file_hasher();
let chunk_range = file_chunk_range(&chunk_table, file_index);
(chunk_range.len() as u32).update_hash(&mut hasher);
for chunk_idx in chunk_range {
write_chunk_hash(&mut hasher, &chunk_table[chunk_idx])
}
file_info.hash = hasher.finish();
}
(file_table, chunk_table)
}
/// Build a chunk table from the file table.
#[cfg(debug_assertions)]
fn build_chunk_table_sequential(
metrics: &ManifestMetrics,
log: &ReplicaLogger,
root: &Path,
files: Vec<FileWithSize>,
max_chunk_size: u32,
chunk_actions: Vec<ChunkAction>,
) -> (Vec<FileInfo>, Vec<ChunkInfo>) {
let mut chunk_table = Vec::new();
let mut file_table = Vec::new();
let mut chunk_index: usize = 0;
for (file_index, FileWithSize(relative_path, size_bytes)) in files.into_iter().enumerate() {
let mut file_hash = file_hasher();
let mut bytes_left = size_bytes;
let num_chunks = count_chunks(size_bytes, max_chunk_size);
(num_chunks as u32).update_hash(&mut file_hash);
let compute_file_chunk_hashes = |data: &[u8]| {
// It's OK to not have any chunks for 0-sized files (though it's unlikely that
// we have any).
while bytes_left > 0 {
let chunk_size = bytes_left.min(max_chunk_size as u64);
let offset = size_bytes - bytes_left;
let recompute_chunk_hash = || {
let mut hasher = chunk_hasher();
hasher.write(&data[offset as usize..(offset + chunk_size) as usize]);
hasher.finish()
};
assert!(chunk_index < chunk_actions.len());
let chunk_hash = match chunk_actions[chunk_index] {
ChunkAction::RecomputeAndCompare(reused_chunk_hash) => {
metrics
.chunk_bytes
.with_label_values(&[LABEL_VALUE_HASHED_AND_COMPARED])
.inc_by(chunk_size);
// We have both a reused and a recomputed hash, so we can compare them to
// monitor for issues
let recomputed_chunk_hash = recompute_chunk_hash();
debug_assert_eq!(recomputed_chunk_hash, reused_chunk_hash);
if recomputed_chunk_hash != reused_chunk_hash {
metrics.reused_chunk_hash_error_count.inc();
error!(
log,
"{}: Hash mismatch in chunk with index {} in file {}, recomputed hash {:?}, reused hash {:?}",
CRITICAL_ERROR_REUSED_CHUNK_HASH,
chunk_index,
relative_path.display(),
recomputed_chunk_hash,
reused_chunk_hash
);
}
recomputed_chunk_hash
}
ChunkAction::UseHash(reused_chunk_hash) => {
metrics
.chunk_bytes
.with_label_values(&[LABEL_VALUE_REUSED])
.inc_by(chunk_size);
reused_chunk_hash
}
ChunkAction::Recompute => {
metrics
.chunk_bytes
.with_label_values(&[LABEL_VALUE_REUSED])
.inc_by(chunk_size);
recompute_chunk_hash()
}
};
let chunk_info = ChunkInfo {
file_index: file_index as u32,
size_bytes: chunk_size as u32,
offset,
hash: chunk_hash,
};
write_chunk_hash(&mut file_hash, &chunk_info);
chunk_table.push(chunk_info);
bytes_left -= chunk_size;
chunk_index += 1;
}
file_table.push(FileInfo {
relative_path: relative_path.clone(),
size_bytes,
hash: file_hash.finish(),
});
};
let mmap = ScopedMmap::from_path(root.join(&relative_path)).expect("failed to open file");
let data = mmap.as_slice();
compute_file_chunk_hashes(data);
}
assert_eq!(chunk_table.len(), chunk_actions.len());
(file_table, chunk_table)
}
/// Traverses root recursively and populates the `files` vector with entries of
/// the form `(relative_file_name, file_len)`.
fn files_with_sizes(
root: &Path,
relative_path: PathBuf,
files: &mut Vec<FileWithSize>,
) -> Result<(), CheckpointError> {
let absolute_path = root.join(&relative_path);
let metadata = absolute_path
.metadata()
.map_err(|io_err| CheckpointError::IoError {
path: absolute_path.clone(),
message: "failed to get metadata".to_string(),
io_err: io_err.to_string(),
})?;
if metadata.is_file() {
files.push(FileWithSize(relative_path, metadata.len()))
} else {
if relative_path.ends_with("slot_db") {
return Ok(());
}
assert!(
metadata.is_dir(),
"Checkpoints must not contain special files, found one at {}",
absolute_path.display()
);
for entry_result in absolute_path
.read_dir()
.map_err(|io_err| CheckpointError::IoError {
path: absolute_path.clone(),
message: "failed to read dir".to_string(),
io_err: io_err.to_string(),
})?
{
let entry = entry_result.map_err(|io_err| CheckpointError::IoError {
path: absolute_path.clone(),
message: "failed to read dir entry".to_string(),
io_err: io_err.to_string(),
})?;
files_with_sizes(root, relative_path.join(entry.file_name()), files)?;
}
}
Ok(())
}
/// Returns the range of chunks belonging to the file with the specified index.
///
/// If the file is empty and doesn't have any chunks, returns an empty range.
pub fn file_chunk_range(chunk_table: &[ChunkInfo], file_index: usize) -> Range<usize> {
let start = chunk_table.partition_point(|c| (c.file_index as usize) < file_index);
let end = chunk_table.partition_point(|c| (c.file_index as usize) < file_index + 1);
start..end
}
/// Makes a "hash plan": an instruction how to compute the hash of each chunk of
/// the new manifest.
fn hash_plan(
base_manifest: &Manifest,
files: &[FileWithSize],
dirty_file_chunks: BTreeMap<PathBuf, BitVec>,
max_chunk_size: u32,
seed: u64,
rehash_every_nth: u64,
) -> Vec<ChunkAction> {
// Even if we could reuse all chunks, we want to ensure that we sometimes still
// recompute them anyway to not propagate errors indefinitely. We choose a
// uniformly random offset in [0, rehash_every_nth - 1] and recompute any chunks
// with ((chunk_index + offset) % rehash_every_nth) == 0. The sampling is done
// using an rng so that it's not always the same chunks but seeded
// deterministically. We want to ensure that all replicas have the same hash
// plan, as otherwise a replica that detects an error might not be able to
// sway consensus. At the same time, we do not require unpredictability
// here, as long as we can guarantee that we find faulty chunks within
// rehash_every_nth checkpoints in expectation.
let mut rng = ChaChaRng::seed_from_u64(seed);
let rehash_every_nth = rehash_every_nth.max(1); // 0 will behave like 1
let offset = rng.gen_range(0..rehash_every_nth);
debug_assert!(uses_chunk_size(base_manifest, max_chunk_size));
let mut chunk_actions: Vec<ChunkAction> = Vec::new();
for FileWithSize(relative_path, size_bytes) in files.iter() {
let num_chunks = count_chunks(*size_bytes, max_chunk_size);
let compute_dirty_chunk_bitmap = || -> Option<(&BitVec, usize)> {
let dirty_chunk_bitmap = dirty_file_chunks.get(relative_path)?;
let base_file_index = base_manifest
.file_table
.binary_search_by_key(&relative_path, |file_info| &file_info.relative_path)
.ok()?;
// The chunk table contains chunks from all files and hence `base_index` is
// needed to know the absolute index. The chunk table is sorted by
// `file_index` and then `offset`. Therefore binary search can be used to find
// the first chunk index of a file.
let base_index = base_manifest
.chunk_table
.binary_search_by(|chunk_info| {
chunk_info
.file_index
.cmp(&(base_file_index as u32))
.then_with(|| chunk_info.offset.cmp(&0u64))
})
.ok()?;
Some((dirty_chunk_bitmap, base_index))
};
if let Some((dirty_chunk_bitmap, base_index)) = compute_dirty_chunk_bitmap() {
debug_assert_eq!(num_chunks, dirty_chunk_bitmap.len());
for i in 0..num_chunks {
let action = if dirty_chunk_bitmap[i] {
ChunkAction::Recompute
} else {
let chunk = &base_manifest.chunk_table[base_index + i];
debug_assert_eq!(
&base_manifest.file_table[chunk.file_index as usize].relative_path,
relative_path
);
debug_assert_eq!(chunk.offset, i as u64 * max_chunk_size as u64);
debug_assert_eq!(
chunk.size_bytes as u64,
(size_bytes - chunk.offset).min(max_chunk_size as u64)
);
// We are using chunk_actions.len() as shorthand for the chunk_index.
let offset_index = (chunk_actions.len() as u64).wrapping_add(offset);
if (offset_index % rehash_every_nth) == 0 {
ChunkAction::RecomputeAndCompare(chunk.hash)
} else {
ChunkAction::UseHash(chunk.hash)
}
};
chunk_actions.push(action);
}
} else {
for _ in 0..num_chunks {
chunk_actions.push(ChunkAction::Recompute);
}
}
}
chunk_actions
}
/// Returns the trivial hash plan that instructs the caller to recompute hashes
/// of all the chunks.
fn default_hash_plan(files: &[FileWithSize], max_chunk_size: u32) -> Vec<ChunkAction> {
let chunks_total: usize = files
.iter()
.map(|FileWithSize(_, size_bytes)| count_chunks(*size_bytes, max_chunk_size))
.sum();
vec![ChunkAction::Recompute; chunks_total]
}
fn dirty_chunks_of_file(
relative_path: &Path,
page_indices: &[PageIndex],
files: &[FileWithSize],
max_chunk_size: u32,
base_manifest: &Manifest,
) -> Option<BitVec> {
if let Ok(index) =
files.binary_search_by(|FileWithSize(file_path, _)| file_path.as_path().cmp(relative_path))
{
let size_bytes = files[index].1;
let num_chunks = count_chunks(size_bytes, max_chunk_size);
let mut chunks_bitmap = BitVec::from_elem(num_chunks, false);
for page_index in page_indices {
// As the chunk size is a multiple of the page size, at most one chunk could
// possibly be affected.
let chunk_index = PAGE_SIZE * page_index.get() as usize / max_chunk_size as usize;
chunks_bitmap.set(chunk_index, true);
}
// NB. The code below handles the case when the file size increased, but the
// dirty pages do not cover the new area. This should not happen in the current
// implementation of PageMap, but we don't want to rely too much on these
// implementation details. So we mark the expanded area as dirty explicitly
// instead.
let base_file_index = base_manifest
.file_table
.binary_search_by(|file_info| file_info.relative_path.as_path().cmp(relative_path));
// This should never happen under normal operation. However, disaster recovery can add
// files into checkpoints, so we relax the check in production and return None if the file
// is missing in the base manifest. This triggers full re-hashing of the corresponding
// file.
debug_assert!(
base_file_index.is_ok(),
"could not find file {} in the base manifest",
relative_path.display()
);
let base_file_index = base_file_index.ok()?;
let base_file_size = base_manifest.file_table[base_file_index].size_bytes;
if base_file_size < size_bytes {
let from_chunk = count_chunks(base_file_size, max_chunk_size).max(1) - 1;
for i in from_chunk..num_chunks {
chunks_bitmap.set(i, true);
}
}
Some(chunks_bitmap)
} else {
None
}
}
/// Computes the bitmap of chunks modified since the base state.
fn dirty_pages_to_dirty_chunks(
manifest_delta: &ManifestDelta,
checkpoint: &CheckpointLayout<ReadOnly>,
files: &[FileWithSize],
max_chunk_size: u32,
) -> Result<BTreeMap<PathBuf, BitVec>, CheckpointError> {
debug_assert!(uses_chunk_size(
&manifest_delta.base_manifest,
max_chunk_size
));
// The `max_chunk_size` is set to 1 MiB currently and the assertion below meets.
// Note that currently the code does not support changing `max_chunk_size`
// without adding explicit code for backward compatibility.
assert_eq!(
max_chunk_size as usize % PAGE_SIZE,
0,
"chunk size must be a multiple of page size for incremental computation to work correctly"
);
let mut dirty_chunks: BTreeMap<PathBuf, BitVec> = Default::default();
for dirty_page in &manifest_delta.dirty_memory_pages {
if dirty_page.height != manifest_delta.base_height {
continue;
}
let path = match dirty_page.file_type {
FileType::PageMap(page_type) => page_type.path(checkpoint),
FileType::WasmBinary(canister_id) => {
assert!(dirty_page.page_delta_indices.is_empty());
checkpoint
.canister(&canister_id)
.map(|can| can.wasm().raw_path().to_owned())
}
};
if let Ok(path) = path {
let relative_path = path
.strip_prefix(checkpoint.raw_path())
.expect("failed to strip path prefix");
if let Some(chunks_bitmap) = dirty_chunks_of_file(
relative_path,
&dirty_page.page_delta_indices,
files,
max_chunk_size,
&manifest_delta.base_manifest,
) {
dirty_chunks.insert(relative_path.to_path_buf(), chunks_bitmap);
}
}
}
Ok(dirty_chunks)
}
/// Computes manifest for the checkpoint located at `checkpoint_root_path`.
pub fn compute_manifest(
thread_pool: &mut scoped_threadpool::Pool,
metrics: &ManifestMetrics,
log: &ReplicaLogger,
version: u32,
checkpoint: &CheckpointLayout<ReadOnly>,
max_chunk_size: u32,
opt_manifest_delta: Option<ManifestDelta>,
) -> Result<Manifest, CheckpointError> {
let mut files = Vec::new();
files_with_sizes(checkpoint.raw_path(), "".into(), &mut files)?;
// We sort the table to make sure that the table is the same on all replicas
files.sort_unstable_by(|lhs, rhs| lhs.0.cmp(&rhs.0));
let chunk_actions = match opt_manifest_delta {
Some(manifest_delta) => {
// We have to check that the old manifest uses exactly the same chunk size.
// Otherwise, if someone decides to change the chunk size in future,
// all the tests are going to pass (because all of them will use the
// new chunk size), but the manifest might be computed incorrectly
// on the mainnet.
if uses_chunk_size(&manifest_delta.base_manifest, max_chunk_size) {
let dirty_file_chunks = dirty_pages_to_dirty_chunks(
&manifest_delta,
checkpoint,
&files,
max_chunk_size,
)?;
hash_plan(
&manifest_delta.base_manifest,
&files,
dirty_file_chunks,
max_chunk_size,
manifest_delta.target_height.get(),
REHASH_EVERY_NTH_CHUNK,
)
} else {
default_hash_plan(&files, max_chunk_size)
}
}
None => default_hash_plan(&files, max_chunk_size),
};
#[cfg(debug_assertions)]
let (seq_file_table, seq_chunk_table) = {
let metrics_registry = ic_metrics::MetricsRegistry::new();
let metrics = ManifestMetrics::new(&metrics_registry);
build_chunk_table_sequential(
&metrics,
log,
checkpoint.raw_path(),
files.clone(),
max_chunk_size,
chunk_actions.clone(),
)
};
let (file_table, chunk_table) = build_chunk_table_parallel(
thread_pool,
metrics,
log,
checkpoint.raw_path(),
files,
max_chunk_size,
chunk_actions,
);
#[cfg(debug_assertions)]
{
assert_eq!(file_table, seq_file_table);
assert_eq!(chunk_table, seq_chunk_table);
}
let manifest = Manifest::new(version, file_table, chunk_table);
metrics
.manifest_size
.set(encode_manifest(&manifest).len() as i64);
if manifest.chunk_table.len() > FILE_GROUP_CHUNK_ID_OFFSET as usize / 2 {
warn!(
log,
"The chunk table is longer than half of the available chunk ID space in state sync. chunk table length: {}, state sync max chunk id: {}",
manifest.chunk_table.len(),
FILE_GROUP_CHUNK_ID_OFFSET - 1,
);
}
Ok(manifest)
}
/// Validates manifest contents and checks that the hash of the manifest matches
/// the expected root hash.
pub fn validate_manifest(
manifest: &Manifest,
root_hash: &CryptoHashOfState,
) -> Result<(), ManifestValidationError> {
if manifest.version > MAX_SUPPORTED_STATE_SYNC_VERSION {
return Err(ManifestValidationError::UnsupportedManifestVersion {
manifest_version: manifest.version,
max_supported_version: MAX_SUPPORTED_STATE_SYNC_VERSION,
});
}
let mut chunk_start: usize = 0;
for (file_index, f) in manifest.file_table.iter().enumerate() {
let mut hasher = file_hasher();
let chunk_count: usize = manifest.chunk_table[chunk_start..]
.iter()
.take_while(|chunk| chunk.file_index as usize == file_index)
.count();
(chunk_count as u32).update_hash(&mut hasher);
for chunk_info in manifest.chunk_table[chunk_start..chunk_start + chunk_count].iter() {
assert_eq!(chunk_info.file_index, file_index as u32);
write_chunk_hash(&mut hasher, chunk_info);
}
chunk_start += chunk_count;
let hash = hasher.finish();
if hash != f.hash {
return Err(ManifestValidationError::InvalidFileHash {
relative_path: f.relative_path.clone(),
expected_hash: f.hash.to_vec(),
actual_hash: hash.to_vec(),
});
}
}
let hash = manifest_hash(manifest);
if root_hash.get_ref().0 != hash {
return Err(ManifestValidationError::InvalidRootHash {
expected_hash: root_hash.get_ref().0.clone(),
actual_hash: hash.to_vec(),
});
}
Ok(())
}
/// Checks that the size and hash of the received chunk match the chunk table of
/// the manifest.
pub fn validate_chunk(
ix: usize,
bytes: &[u8],
manifest: &Manifest,
) -> Result<(), ChunkValidationError> {
let chunk = &manifest.chunk_table[ix];
let expected_size = chunk.size_bytes as usize;
if bytes.len() != expected_size {
return Err(ChunkValidationError::InvalidChunkSize {
chunk_ix: ix,
expected_size,
actual_size: bytes.len(),
});
}
let mut hasher = chunk_hasher();
let hash = {
hasher.write(bytes);
hasher.finish()
};
if hash != chunk.hash {
return Err(ChunkValidationError::InvalidChunkHash {
chunk_ix: ix,
expected_hash: chunk.hash.to_vec(),
actual_hash: hash.to_vec(),
});
}