forked from gochain/gochain
/
file_segment.go
726 lines (616 loc) · 18.4 KB
/
file_segment.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
package ethdb
import (
"bufio"
"bytes"
"context"
"encoding/binary"
"encoding/hex"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"github.com/cespare/xxhash"
"github.com/edsrzf/mmap-go"
"github.com/gochain-io/gochain/common"
"github.com/gochain-io/gochain/log"
)
var (
ErrImmutableSegment = errors.New("ethdb: immutable segment")
ErrSegmentTypeUnknown = errors.New("ethdb: segment type unknown")
ErrFileSegmentChecksumMismatch = errors.New("ethdb: file segment checksum mismatch")
)
const (
// FileSegmentMagic is the magic number at the beginning of the file segment.
FileSegmentMagic = "ETH1"
// FileSegmentChecksumSize is the size of the checksum, in bytes.
FileSegmentChecksumSize = 8
// FileSegmentIndexOffsetSize is the size of the index offset, in bytes.
FileSegmentIndexOffsetSize = 8
// FileSegmentIndexCountSize is the size of the index element count, in bytes.
FileSegmentIndexCountSize = 8
// FileSegmentIndexCapacitySize is the size of the index capacity, in bytes.
FileSegmentIndexCapacitySize = 8
// FileSegmentHeaderSize is the total size of the fixed length FileSegment header.
FileSegmentHeaderSize = 0 +
len(FileSegmentMagic) +
FileSegmentChecksumSize +
FileSegmentIndexOffsetSize +
FileSegmentIndexCountSize +
FileSegmentIndexCapacitySize
)
// Ensure implementation implements interface.
var _ Segment = (*FileSegment)(nil)
// FileSegment represents an immutable key/value file segment for a table.
type FileSegment struct {
name string // segment name
path string // on-disk path
data []byte // memory-mapped data
}
// NewFileSegment returns a new instance of FileSegment.
func NewFileSegment(name, path string) *FileSegment {
return &FileSegment{
name: name,
path: path,
}
}
// Open opens and initializes the file segment.
func (s *FileSegment) Open() error {
file, err := os.Open(s.path)
if err != nil {
log.Error("Cannot open file segment", "path", s.path, "err", err)
return err
}
defer file.Close()
// Memory-map data.
data, err := mmap.Map(file, mmap.RDONLY, 0)
if err != nil {
log.Error("Cannot mmap file segment", "path", s.path, "err", err)
return err
}
s.data = []byte(data)
// Ensure header information is valid.
if len(data) < FileSegmentHeaderSize {
s.Close()
return errors.New("ethdb: file header too short")
} else if string(data[:len(FileSegmentMagic)]) != FileSegmentMagic {
s.Close()
return errors.New("ethdb: invalid ethdb file")
}
return nil
}
// Close closes the file and its mmap.
func (s *FileSegment) Close() error {
if s.data != nil {
if err := (*mmap.MMap)(&s.data).Unmap(); err != nil {
return err
}
s.data = nil
}
return nil
}
// Name returns the name of the segment.
func (s *FileSegment) Name() string { return s.name }
// Path returns the path of the segment.
func (s *FileSegment) Path() string { return s.path }
// Size returns the size of the underlying data file.
func (s *FileSegment) Size() int {
return len(s.data)
}
// Data returns the underlying mmap data.
func (s *FileSegment) Data() []byte {
return s.data
}
// Checksum returns the checksum written to the segment file.
func (s *FileSegment) Checksum() []byte {
if len(s.data) < len(FileSegmentMagic)+FileSegmentChecksumSize {
return nil
}
return s.data[4:12]
}
// Len returns the number of keys in the file.
func (s *FileSegment) Len() int {
if s.data == nil {
return 0
}
data := s.data[len(FileSegmentMagic)+FileSegmentChecksumSize+FileSegmentIndexOffsetSize:]
return int(binary.BigEndian.Uint64(data[:FileSegmentIndexCountSize]))
}
// index returns the byte slice containing the index.
func (s *FileSegment) Index() []byte {
if s.data == nil {
return nil
}
return s.data[s.IndexOffset():]
}
// indexOffset returns the file offset where the index starts.
func (s *FileSegment) IndexOffset() int64 {
if s.data == nil {
return -1
}
return int64(binary.BigEndian.Uint64(s.data[len(FileSegmentMagic)+FileSegmentChecksumSize:]))
}
// capacity returns the capacity of the index.
func (s *FileSegment) Cap() int {
if s.data == nil {
return 0
}
data := s.data[len(FileSegmentMagic)+FileSegmentChecksumSize+FileSegmentIndexOffsetSize+FileSegmentIndexCountSize:]
return int(binary.BigEndian.Uint64(data[:FileSegmentIndexCapacitySize]))
}
// Has returns true if the key exists.
func (s *FileSegment) Has(key []byte) (bool, error) {
koff, _ := s.offset(key)
return koff != 0, nil
}
// Get returns the value of the given key.
func (s *FileSegment) Get(key []byte) ([]byte, error) {
defer func() {
if r := recover(); r != nil {
log.Error("Cannot read key in file segment", "path", s.path, "key", fmt.Sprintf("%x", key))
panic(r)
}
}()
_, voff := s.offset(key)
if voff == 0 {
return nil, common.ErrNotFound
}
// Read value.
data := s.data[voff:]
n, sz := binary.Uvarint(data)
return common.CopyBytes(data[sz : sz+int(n) : sz+int(n)]), nil
}
// Iterator returns an iterator for iterating over all key/value pairs.
func (s *FileSegment) Iterator() SegmentIterator {
return &FileSegmentIterator{
data: s.data[:s.IndexOffset()],
offset: int64(FileSegmentHeaderSize),
}
}
// offset returns the offset of key & value. Returns 0 if key does not exist.
func (s *FileSegment) offset(key []byte) (koff, voff int64) {
capacity := uint64(s.Cap())
if capacity == 0 {
return 0, 0
}
mask := capacity - 1
idx := s.Index()
hash := hashKey(key)
pos := hash & mask
for d := uint64(0); ; d++ {
// Exit if empty slot found.
offset := int64(binary.BigEndian.Uint64(idx[pos*8:]))
if offset == 0 {
return 0, 0
}
// Read current key & compute hash.
data := s.data[offset:]
n, sz := binary.Uvarint(data)
curr := data[sz : sz+int(n)]
currHash := hashKey(curr)
// Exit if distance exceeds current slot or key matches.
if d > dist(currHash, pos, capacity, mask) {
return 0, 0
} else if currHash == hash && bytes.Equal(curr, key) {
return offset, offset + int64(sz) + int64(n)
}
pos = (pos + 1) & mask
}
}
// Ensure implementation implements interface.
var _ SegmentIterator = (*FileSegmentIterator)(nil)
// FileSegmentIterator returns an error for sequentially iterating over a
// FileSegment's key/value pairs.
type FileSegmentIterator struct {
data []byte
offset int64
key []byte
value []byte
}
// Close releases the iterator.
func (itr *FileSegmentIterator) Close() error {
itr.data, itr.offset = nil, 0
itr.key, itr.value = nil, nil
return nil
}
// Key returns the current key. Must be called after Next().
func (itr *FileSegmentIterator) Key() []byte { return itr.key }
// Value returns the current key. Must be called after Next().
func (itr *FileSegmentIterator) Value() []byte { return itr.value }
// Next reads the next key/value pair into the buffer.
func (itr *FileSegmentIterator) Next() bool {
if itr.offset >= int64(len(itr.data)) {
return false
}
// Read key.
n, sz := binary.Uvarint(itr.data[itr.offset:])
itr.key = itr.data[itr.offset+int64(sz) : itr.offset+int64(sz+int(n))]
itr.offset += int64(sz + int(n))
// Read value.
n, sz = binary.Uvarint(itr.data[itr.offset:])
itr.value = itr.data[itr.offset+int64(sz) : itr.offset+int64(sz+int(n))]
itr.offset += int64(sz + int(n))
return true
}
// FileSegmentOpener initializes and opens segments.
type FileSegmentOpener struct{}
// NewFileSegmentOpener returns a new instance of FileSegmentOpener.
func NewFileSegmentOpener() *FileSegmentOpener {
return &FileSegmentOpener{}
}
// ListSegmentNames returns a list of all segment names for a table.
func (o *FileSegmentOpener) ListSegmentNames(path, table string) ([]string, error) {
fis, err := ioutil.ReadDir(path)
if err != nil {
log.Error("Cannot list file segments", "path", path, "table", table, "err", err)
return nil, err
}
var keys []string
for _, fi := range fis {
if filepath.Ext(fi.Name()) != "" {
continue
}
keys = append(keys, fi.Name())
}
return keys, nil
}
// OpenSegment returns an initialized and opened segment.
func (o *FileSegmentOpener) OpenSegment(table, name, path string) (Segment, error) {
// Determine the segment file type.
typ, err := SegmentFileType(path)
if err != nil {
return nil, err
}
switch typ {
case SegmentETH1:
segment := NewFileSegment(name, path)
if err := segment.Open(); err != nil {
return nil, err
}
return segment, nil
default:
return nil, ErrSegmentTypeUnknown
}
}
// FileSegmentCompactor locally compacts LDB segments into file segments.
type FileSegmentCompactor struct{}
// NewFileSegmentCompactor returns a new instance of FileSegmentCompactor.
func NewFileSegmentCompactor() *FileSegmentCompactor {
return &FileSegmentCompactor{}
}
// CompactSegment compacts an LDB segment into a file segment.
func (c *FileSegmentCompactor) CompactSegment(ctx context.Context, table string, s *LDBSegment) (Segment, error) {
tmpPath := s.Path() + ".tmp"
if err := c.CompactSegmentTo(ctx, s, tmpPath); err != nil {
return nil, err
} else if err := s.Close(); err != nil {
return nil, err
} else if err := c.RenameSegment(ctx, s.Path(), tmpPath); err != nil {
return nil, err
}
// Reopen as file segment.
newSegment := NewFileSegment(s.Name(), s.Path())
if err := newSegment.Open(); err != nil {
return nil, err
}
return newSegment, nil
}
// CompactSegmentTo compacts an LDB segment to a specified path.
func (c *FileSegmentCompactor) CompactSegmentTo(ctx context.Context, s *LDBSegment, path string) error {
if err := s.CompactTo(path); err != nil {
os.Remove(path)
return err
}
return nil
}
// UncompactSegment converts an LDB segment back into a file segment.
func (c *FileSegmentCompactor) UncompactSegment(ctx context.Context, table string, s Segment) (*LDBSegment, error) {
tmpPath := s.Path() + ".tmp"
if err := c.UncompactSegmentTo(ctx, s, tmpPath); err != nil {
return nil, err
} else if err := s.Close(); err != nil {
return nil, err
} else if err := c.RenameSegment(ctx, s.Path(), tmpPath); err != nil {
return nil, err
}
// Reopen as LDB segment.
newLDBSegment := NewLDBSegment(s.Name(), s.Path())
if err := newLDBSegment.Open(); err != nil {
return nil, err
}
return newLDBSegment, nil
}
// UncompactSegmentTo converts a segment back to an LDB segment at path.
func (c *FileSegmentCompactor) UncompactSegmentTo(ctx context.Context, s Segment, path string) error {
if err := UncompactSegmentTo(s, path); err != nil {
os.Remove(path)
return err
}
return nil
}
// RenameSegment removes dst and renames the new segment at path.
func (c *FileSegmentCompactor) RenameSegment(ctx context.Context, dst, src string) error {
if err := os.RemoveAll(dst); err != nil {
return err
} else if err := os.Rename(src, dst); err != nil {
return err
}
return nil
}
// FileSegmentEncoder represents a encoder for building a ethdb.FileSegment.
type FileSegmentEncoder struct {
f *os.File
flushed bool
offset int64
offsets []int64
// Filename of file segment to encode.
Path string
}
func NewFileSegmentEncoder(path string) *FileSegmentEncoder {
return &FileSegmentEncoder{
Path: path,
}
}
// Open opens and initializes the output file segment.
func (enc *FileSegmentEncoder) Open() (err error) {
if enc.f != nil {
return errors.New("ethdb: file already open")
}
if enc.f, err = os.OpenFile(enc.Path, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666); err != nil {
return err
}
// Write magic & leave space for checksum & index offset.
if _, err := enc.f.Write([]byte(FileSegmentMagic)); err != nil {
enc.Close()
return err
} else if _, err := enc.f.Write(make([]byte, FileSegmentHeaderSize-len(FileSegmentMagic))); err != nil {
enc.Close()
return err
}
enc.offset = int64(FileSegmentHeaderSize)
return nil
}
// Close closes the file handle. File must be flushed before calling close.
func (enc *FileSegmentEncoder) Close() error {
if enc.f != nil {
if err := enc.f.Close(); err != nil {
return err
}
}
return nil
}
// Flush finalizes the file segment and appends a hashmap & trailer.
func (enc *FileSegmentEncoder) Flush() error {
if enc.flushed {
return errors.New("ethdb: file index already flushed")
}
enc.flushed = true
if err := enc.writeIndex(); err != nil {
return fmt.Errorf("ethdb: cannot write index: %s", err)
} else if err := enc.writeChecksum(); err != nil {
return fmt.Errorf("ethdb: cannot write checksum: %s", err)
} else if err := enc.f.Sync(); err != nil {
return err
}
return nil
}
// EncodeKeyValue writes framed key & value byte slices to the file and records their offset.
func (enc *FileSegmentEncoder) EncodeKeyValue(key, value []byte) error {
buf := make([]byte, binary.MaxVarintLen64)
offset := enc.offset
// Write key len + data.
n := binary.PutUvarint(buf, uint64(len(key)))
if err := enc.write(buf[:n]); err != nil {
return err
} else if err := enc.write(key); err != nil {
return err
}
// Write value len + data.
n = binary.PutUvarint(buf, uint64(len(value)))
if err := enc.write(buf[:n]); err != nil {
return err
} else if err := enc.write(value); err != nil {
return err
}
enc.offsets = append(enc.offsets, offset)
return nil
}
func (enc *FileSegmentEncoder) write(b []byte) error {
n, err := enc.f.Write(b)
enc.offset += int64(n)
return err
}
func (enc *FileSegmentEncoder) writeIndex() error {
// Save offset to the start of the index.
indexOffset := enc.offset
// Open separate handler to reasd on-disk data.
f, err := os.Open(enc.Path)
if err != nil {
return err
}
defer f.Close()
// Build index in-memory.
idx := newFileSegmentEncoderIndex(f, len(enc.offsets))
for _, offset := range enc.offsets {
if err := idx.insert(offset); err != nil {
return err
}
}
// Encode index to writer.
if _, err := idx.WriteTo(enc.f); err != nil {
return err
}
// Write length, capacity & index offset to the header.
hdr := make([]byte, FileSegmentIndexOffsetSize+FileSegmentIndexCountSize+FileSegmentIndexCapacitySize)
binary.BigEndian.PutUint64(hdr[0:8], uint64(indexOffset))
binary.BigEndian.PutUint64(hdr[8:16], uint64(len(enc.offsets)))
binary.BigEndian.PutUint64(hdr[16:24], uint64(idx.capacity()))
if _, err := enc.f.Seek(int64(len(FileSegmentMagic)+FileSegmentChecksumSize), io.SeekStart); err != nil {
return err
} else if _, err := enc.f.Write(hdr); err != nil {
return err
} else if err := enc.f.Sync(); err != nil {
return err
}
return nil
}
func (enc *FileSegmentEncoder) writeChecksum() error {
buf, err := ChecksumFileSegment(enc.Path)
if err != nil {
return err
}
if _, err := enc.f.Seek(int64(len(FileSegmentMagic)), io.SeekStart); err != nil {
return err
} else if _, err := enc.f.Write(buf); err != nil {
return err
} else if err := enc.f.Sync(); err != nil {
return err
}
return nil
}
// ChecksumFileSegment calculates the checksum for the file segment at path.
func ChecksumFileSegment(path string) ([]byte, error) {
// Open handler to compute checksum.
f, err := os.Open(path)
if err != nil {
return nil, err
}
defer f.Close()
// Compute checksum for all data after checksum.
h := xxhash.New()
if _, err := f.Seek(int64(len(FileSegmentMagic)+FileSegmentChecksumSize), io.SeekStart); err != nil {
return nil, err
} else if _, err := io.Copy(h, f); err != nil {
return nil, err
}
buf := make([]byte, FileSegmentChecksumSize)
binary.BigEndian.PutUint64(buf, h.Sum64())
return buf, nil
}
// VerifyFileSegment compares the calculated and stored checksum of the segment at path.
func VerifyFileSegment(path string) error {
computed, err := ChecksumFileSegment(path)
if err != nil {
return err
}
s := NewFileSegment(filepath.Base(path), path)
if err := s.Open(); err != nil {
return err
}
defer s.Close()
if !bytes.Equal(s.Checksum(), computed) {
return ErrFileSegmentChecksumMismatch
}
return nil
}
// fileSegmentEncoderIndex represents a fixed-length RHH-based hash map.
// The map does not support insertion of duplicate keys.
//
// https://cs.uwaterloo.ca/research/tr/1986/CS-86-14.pdf
type fileSegmentEncoderIndex struct {
src io.ReadSeeker
r *bufio.Reader
mask uint64
elems []int64
}
// newFileSegmentEncoderIndex returns a new instance of fileSegmentEncoderIndex.
func newFileSegmentEncoderIndex(src io.ReadSeeker, n int) *fileSegmentEncoderIndex {
idx := &fileSegmentEncoderIndex{
src: src,
r: bufio.NewReader(src),
}
// Determine maximum capacity by padding length and finding next power of 2.
const loadFactor = 90
capacity := pow2(uint64((n * 100) / loadFactor))
idx.elems = make([]int64, capacity)
idx.mask = uint64(capacity - 1)
return idx
}
// WriteTo writes the index to w. Implements io.WriterTo.
func (idx *fileSegmentEncoderIndex) WriteTo(w io.Writer) (n int64, err error) {
buf := make([]byte, 8)
for _, elem := range idx.elems {
binary.BigEndian.PutUint64(buf, uint64(elem))
nn, err := w.Write(buf)
if n += int64(nn); err != nil {
return n, err
}
}
return n, nil
}
// capacity returns the computed capacity based on the initial count.
func (idx *fileSegmentEncoderIndex) capacity() int {
return len(idx.elems)
}
// insert writes the element at the given offset to the index.
func (idx *fileSegmentEncoderIndex) insert(offset int64) error {
key, err := idx.readAt(offset)
if err != nil {
return err
}
pos := hashKey(key) & idx.mask
capacity := uint64(len(idx.elems))
var d uint64
for {
// Exit empty slot exists.
if idx.elems[pos] == 0 {
idx.elems[pos] = offset
return nil
}
// Read key at current position.
curr, err := idx.readAt(idx.elems[pos])
if err != nil {
return err
}
// Return an error if a duplicate key exists.
if bytes.Equal(curr, key) {
return errors.New("ethdb: duplicate key written to file segment")
}
// Swap if current element has a lower probe distance.
tmp := dist(hashKey(curr), pos, capacity, idx.mask)
if tmp < d {
offset, idx.elems[pos], d = idx.elems[pos], offset, tmp
}
// Move position forward.
pos = (pos + 1) & idx.mask
d++
}
}
func dist(hash, i, capacity, mask uint64) uint64 {
return ((i + capacity) - (hash & mask)) & mask
}
// readAt reads the key at the given offset.
func (idx *fileSegmentEncoderIndex) readAt(offset int64) ([]byte, error) {
idx.r.Reset(idx.src)
if _, err := idx.src.Seek(offset, io.SeekStart); err != nil {
return nil, err
}
// Read key length.
n, err := binary.ReadUvarint(idx.r)
if err != nil {
return nil, err
}
// Read key.
key := make([]byte, n)
if _, err := io.ReadFull(idx.r, key); err != nil {
return nil, err
}
return key, nil
}
func hashKey(key []byte) uint64 {
h := xxhash.Sum64(key)
if h == 0 {
h = 1
}
return h
}
func pow2(v uint64) uint64 {
for i := uint64(2); i < 1<<62; i *= 2 {
if i >= v {
return i
}
}
panic("unreachable")
}
func hexdump(b []byte) { os.Stderr.Write([]byte(hex.Dump(b))) }