Skip to content

Commit 243d3ba

Browse files
committed
manifest: remove redundant smallest, largest bounds in TableMetadata
We already have something that determines the `boundType` of the smallest & largest keys. This saves us 64 bytes (32 bytes for each InternalKey) per TableMetadata. Informs: #2047
1 parent decdc6f commit 243d3ba

31 files changed

+342
-293
lines changed

compaction.go

Lines changed: 16 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -683,10 +683,10 @@ func (c *compaction) errorOnUserKeyOverlap(ve *versionEdit) error {
683683
if n := len(ve.NewTables); n > 1 {
684684
meta := ve.NewTables[n-1].Meta
685685
prevMeta := ve.NewTables[n-2].Meta
686-
if !prevMeta.Largest.IsExclusiveSentinel() &&
687-
c.cmp(prevMeta.Largest.UserKey, meta.Smallest.UserKey) >= 0 {
686+
if !prevMeta.Largest().IsExclusiveSentinel() &&
687+
c.cmp(prevMeta.Largest().UserKey, meta.Smallest().UserKey) >= 0 {
688688
return errors.Errorf("pebble: compaction split user key across two sstables: %s in %s and %s",
689-
prevMeta.Largest.Pretty(c.formatKey),
689+
prevMeta.Largest().Pretty(c.formatKey),
690690
prevMeta.FileNum,
691691
meta.FileNum)
692692
}
@@ -1000,7 +1000,7 @@ func (c *compaction) String() string {
10001000
i := level - c.startLevel.level
10011001
fmt.Fprintf(&buf, "%d:", level)
10021002
for f := range c.inputs[i].files.All() {
1003-
fmt.Fprintf(&buf, " %s:%s-%s", f.FileNum, f.Smallest, f.Largest)
1003+
fmt.Fprintf(&buf, " %s:%s-%s", f.FileNum, f.Smallest(), f.Largest())
10041004
}
10051005
fmt.Fprintf(&buf, "\n")
10061006
}
@@ -1319,8 +1319,8 @@ func (d *DB) runIngestFlush(c *compaction) (*manifest.VersionEdit, error) {
13191319

13201320
// This file fits perfectly within the excise span, so we can slot it at L6.
13211321
if ingestFlushable.exciseSpan.Valid() &&
1322-
ingestFlushable.exciseSpan.Contains(d.cmp, file.Smallest) &&
1323-
ingestFlushable.exciseSpan.Contains(d.cmp, file.Largest) {
1322+
ingestFlushable.exciseSpan.Contains(d.cmp, file.Smallest()) &&
1323+
ingestFlushable.exciseSpan.Contains(d.cmp, file.Largest()) {
13241324
level = 6
13251325
} else {
13261326
// TODO(radu): this can perform I/O; we should not do this while holding DB.mu.
@@ -2146,7 +2146,7 @@ func (h *deleteCompactionHint) canDeleteOrExcise(
21462146
default:
21472147
panic(fmt.Sprintf("pebble: unknown delete compaction hint type: %d", h.hintType))
21482148
}
2149-
if cmp(h.start, m.Smallest.UserKey) <= 0 &&
2149+
if cmp(h.start, m.Smallest().UserKey) <= 0 &&
21502150
base.UserKeyExclusive(h.end).CompareUpperBounds(cmp, m.UserKeyBounds().End) >= 0 {
21512151
return hintDeletesFile
21522152
}
@@ -2157,7 +2157,7 @@ func (h *deleteCompactionHint) canDeleteOrExcise(
21572157
}
21582158
// Check for any overlap. In cases of partial overlap, we can excise the part of the file
21592159
// that overlaps with the deletion hint.
2160-
if cmp(h.end, m.Smallest.UserKey) > 0 &&
2160+
if cmp(h.end, m.Smallest().UserKey) > 0 &&
21612161
(m.UserKeyBounds().End.CompareUpperBounds(cmp, base.UserKeyInclusive(h.start)) >= 0) {
21622162
return hintExcisesFile
21632163
}
@@ -2260,7 +2260,7 @@ func checkDeleteCompactionHints(
22602260
// leaves a fragment of the file on the left, decrement
22612261
// the counter once. If the hint leaves a fragment of the
22622262
// file on the right, decrement the counter once.
2263-
if cmp(h.start, m.Smallest.UserKey) > 0 {
2263+
if cmp(h.start, m.Smallest().UserKey) > 0 {
22642264
filesDeletedByCurrentHint--
22652265
}
22662266
if m.UserKeyBounds().End.IsUpperBoundFor(cmp, h.end) {
@@ -2660,7 +2660,7 @@ func (d *DB) runCopyCompaction(
26602660
}
26612661
deleteOnExit = true
26622662

2663-
start, end := newMeta.Smallest, newMeta.Largest
2663+
start, end := newMeta.Smallest(), newMeta.Largest()
26642664
if newMeta.SyntheticPrefixAndSuffix.HasPrefix() {
26652665
syntheticPrefix := newMeta.SyntheticPrefixAndSuffix.Prefix()
26662666
start.UserKey = syntheticPrefix.Invert(start.UserKey)
@@ -2797,7 +2797,7 @@ func (d *DB) runDeleteOnlyCompactionForLevel(
27972797
// it maps to a virtual file that replaces f, or nil if f got removed
27982798
// in its entirety.
27992799
curFile := f
2800-
for curFragment < len(fragments) && d.cmp(fragments[curFragment].start, f.Smallest.UserKey) <= 0 {
2800+
for curFragment < len(fragments) && d.cmp(fragments[curFragment].start, f.Smallest().UserKey) <= 0 {
28012801
curFragment++
28022802
}
28032803
if curFragment > 0 {
@@ -3284,7 +3284,7 @@ func (c *compaction) makeVersionEdit(result compact.Result) (*versionEdit, error
32843284

32853285
// Sanity check that the tables are ordered and don't overlap.
32863286
for i := 1; i < len(ve.NewTables); i++ {
3287-
if ve.NewTables[i-1].Meta.UserKeyBounds().End.IsUpperBoundFor(c.cmp, ve.NewTables[i].Meta.Smallest.UserKey) {
3287+
if ve.NewTables[i-1].Meta.UserKeyBounds().End.IsUpperBoundFor(c.cmp, ve.NewTables[i].Meta.Smallest().UserKey) {
32883288
return nil, base.AssertionFailedf("pebble: compaction output tables overlap: %s and %s",
32893289
ve.NewTables[i-1].Meta.DebugString(c.formatKey, true),
32903290
ve.NewTables[i].Meta.DebugString(c.formatKey, true),
@@ -3399,11 +3399,11 @@ func validateVersionEdit(
33993399

34003400
// Validate both new and deleted files.
34013401
for _, f := range ve.NewTables {
3402-
validateKey(f.Meta, f.Meta.Smallest.UserKey)
3403-
validateKey(f.Meta, f.Meta.Largest.UserKey)
3402+
validateKey(f.Meta, f.Meta.Smallest().UserKey)
3403+
validateKey(f.Meta, f.Meta.Largest().UserKey)
34043404
}
34053405
for _, m := range ve.DeletedTables {
3406-
validateKey(m, m.Smallest.UserKey)
3407-
validateKey(m, m.Largest.UserKey)
3406+
validateKey(m, m.Smallest().UserKey)
3407+
validateKey(m, m.Largest().UserKey)
34083408
}
34093409
}

compaction_picker.go

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -475,21 +475,21 @@ func (pc *pickedCompaction) setupInputs(
475475
if pc.outputLevel.files.Empty() {
476476
baseIter := pc.version.Levels[pc.outputLevel.level].Iter()
477477
if sm := baseIter.SeekLT(pc.cmp, pc.smallest.UserKey); sm != nil {
478-
smallestBaseKey = sm.Largest
478+
smallestBaseKey = sm.Largest()
479479
}
480480
if la := baseIter.SeekGE(pc.cmp, pc.largest.UserKey); la != nil {
481-
largestBaseKey = la.Smallest
481+
largestBaseKey = la.Smallest()
482482
}
483483
} else {
484484
// NB: We use Reslice to access the underlying level's files, but
485485
// we discard the returned slice. The pc.outputLevel.files slice
486486
// is not modified.
487487
_ = pc.outputLevel.files.Reslice(func(start, end *manifest.LevelIterator) {
488488
if sm := start.Prev(); sm != nil {
489-
smallestBaseKey = sm.Largest
489+
smallestBaseKey = sm.Largest()
490490
}
491491
if la := end.Next(); la != nil {
492-
largestBaseKey = la.Smallest
492+
largestBaseKey = la.Smallest()
493493
}
494494
})
495495
}
@@ -1096,12 +1096,12 @@ func pickCompactionSeedFile(
10961096
}
10971097

10981098
// Trim any output-level files smaller than f.
1099-
for outputFile != nil && sstableKeyCompare(cmp, outputFile.Largest, f.Smallest) < 0 {
1099+
for outputFile != nil && sstableKeyCompare(cmp, outputFile.Largest(), f.Smallest()) < 0 {
11001100
outputFile = outputIter.Next()
11011101
}
11021102

11031103
skip := false
1104-
for outputFile != nil && sstableKeyCompare(cmp, outputFile.Smallest, f.Largest) <= 0 {
1104+
for outputFile != nil && sstableKeyCompare(cmp, outputFile.Smallest(), f.Largest()) <= 0 {
11051105
overlappingBytes += outputFile.Size
11061106
if outputFile.IsCompacting() {
11071107
// If one of the overlapping files is compacting, we're not going to be
@@ -1155,7 +1155,7 @@ func pickCompactionSeedFile(
11551155
// nothing in the second level and is cheap to compact, when in
11561156
// reality we'd need to expand the compaction to include all 5
11571157
// files.
1158-
if sstableKeyCompare(cmp, outputFile.Largest, f.Largest) > 0 {
1158+
if sstableKeyCompare(cmp, outputFile.Largest(), f.Largest()) > 0 {
11591159
break
11601160
}
11611161
outputFile = outputIter.Next()
@@ -2074,8 +2074,8 @@ func conflictsWithInProgress(
20742074
continue
20752075
}
20762076
iter := in.files.Iter()
2077-
smallest := iter.First().Smallest.UserKey
2078-
largest := iter.Last().Largest.UserKey
2077+
smallest := iter.First().Smallest().UserKey
2078+
largest := iter.Last().Largest().UserKey
20792079
if (in.level == manual.level || in.level == outputLevel) &&
20802080
isUserKeysOverlapping(manual.start, manual.end, smallest, largest, cmp) {
20812081
return true

compaction_picker_test.go

Lines changed: 23 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -315,8 +315,8 @@ func TestCompactionPickerTargetLevel(t *testing.T) {
315315
if c.inputs[0].level == 0 {
316316
iter := c.inputs[0].files.Iter()
317317
l0InProgress = append(l0InProgress, manifest.L0Compaction{
318-
Smallest: iter.First().Smallest,
319-
Largest: iter.Last().Largest,
318+
Smallest: iter.First().Smallest(),
319+
Largest: iter.Last().Largest(),
320320
IsIntraL0: c.outputLevel == 0,
321321
})
322322
}
@@ -426,8 +426,8 @@ func TestCompactionPickerL0(t *testing.T) {
426426
base.ParseInternalKey(strings.TrimSpace(parts[0])),
427427
base.ParseInternalKey(strings.TrimSpace(parts[1])),
428428
)
429-
m.SmallestSeqNum = m.Smallest.SeqNum()
430-
m.LargestSeqNum = m.Largest.SeqNum()
429+
m.SmallestSeqNum = m.Smallest().SeqNum()
430+
m.LargestSeqNum = m.Largest().SeqNum()
431431
if m.SmallestSeqNum > m.LargestSeqNum {
432432
m.SmallestSeqNum, m.LargestSeqNum = m.LargestSeqNum, m.SmallestSeqNum
433433
}
@@ -517,11 +517,11 @@ func TestCompactionPickerL0(t *testing.T) {
517517
return fmt.Sprintf("cannot find compaction file %s", base.FileNum(fileNum))
518518
}
519519
compactFile.CompactionState = manifest.CompactionStateCompacting
520-
if first || base.InternalCompare(DefaultComparer.Compare, info.largest, compactFile.Largest) < 0 {
521-
info.largest = compactFile.Largest
520+
if first || base.InternalCompare(DefaultComparer.Compare, info.largest, compactFile.Largest()) < 0 {
521+
info.largest = compactFile.Largest()
522522
}
523-
if first || base.InternalCompare(DefaultComparer.Compare, info.smallest, compactFile.Smallest) > 0 {
524-
info.smallest = compactFile.Smallest
523+
if first || base.InternalCompare(DefaultComparer.Compare, info.smallest, compactFile.Smallest()) > 0 {
524+
info.smallest = compactFile.Smallest()
525525
}
526526
first = false
527527
compactionFiles[level] = append(compactionFiles[level], compactFile)
@@ -658,9 +658,9 @@ func TestCompactionPickerConcurrency(t *testing.T) {
658658
m.Size = uint64(v)
659659
}
660660
}
661-
m.SmallestSeqNum = m.Smallest.SeqNum()
662-
m.LargestSeqNum = m.Largest.SeqNum()
663-
m.LargestSeqNumAbsolute = m.Largest.SeqNum()
661+
m.SmallestSeqNum = m.Smallest().SeqNum()
662+
m.LargestSeqNum = m.Largest().SeqNum()
663+
m.LargestSeqNumAbsolute = m.Largest().SeqNum()
664664
return m, nil
665665
}
666666

@@ -740,11 +740,11 @@ func TestCompactionPickerConcurrency(t *testing.T) {
740740
return fmt.Sprintf("cannot find compaction file %s", base.FileNum(fileNum))
741741
}
742742
compactFile.CompactionState = manifest.CompactionStateCompacting
743-
if first || base.InternalCompare(DefaultComparer.Compare, info.largest, compactFile.Largest) < 0 {
744-
info.largest = compactFile.Largest
743+
if first || base.InternalCompare(DefaultComparer.Compare, info.largest, compactFile.Largest()) < 0 {
744+
info.largest = compactFile.Largest()
745745
}
746-
if first || base.InternalCompare(DefaultComparer.Compare, info.smallest, compactFile.Smallest) > 0 {
747-
info.smallest = compactFile.Smallest
746+
if first || base.InternalCompare(DefaultComparer.Compare, info.smallest, compactFile.Smallest()) > 0 {
747+
info.smallest = compactFile.Smallest()
748748
}
749749
first = false
750750
compactionFiles[level] = append(compactionFiles[level], compactFile)
@@ -861,9 +861,9 @@ func TestCompactionPickerPickReadTriggered(t *testing.T) {
861861
m.Size = uint64(v)
862862
}
863863
}
864-
m.SmallestSeqNum = m.Smallest.SeqNum()
865-
m.LargestSeqNum = m.Largest.SeqNum()
866-
m.LargestSeqNumAbsolute = m.Largest.SeqNum()
864+
m.SmallestSeqNum = m.Smallest().SeqNum()
865+
m.LargestSeqNum = m.Largest().SeqNum()
866+
m.LargestSeqNumAbsolute = m.Largest().SeqNum()
867867
return m, nil
868868
}
869869

@@ -1017,8 +1017,8 @@ func TestPickedCompactionSetupInputs(t *testing.T) {
10171017
base.ParseInternalKey(strings.TrimSpace(tableParts[0])),
10181018
base.ParseInternalKey(strings.TrimSpace(tableParts[1])),
10191019
)
1020-
m.SmallestSeqNum = m.Smallest.SeqNum()
1021-
m.LargestSeqNum = m.Largest.SeqNum()
1020+
m.SmallestSeqNum = m.Smallest().SeqNum()
1021+
m.LargestSeqNum = m.Largest().SeqNum()
10221022
if m.SmallestSeqNum > m.LargestSeqNum {
10231023
m.SmallestSeqNum, m.LargestSeqNum = m.LargestSeqNum, m.SmallestSeqNum
10241024
}
@@ -1232,7 +1232,7 @@ func TestPickedCompactionExpandInputs(t *testing.T) {
12321232

12331233
var buf bytes.Buffer
12341234
for f := range iter.Take().Slice().All() {
1235-
fmt.Fprintf(&buf, "%d: %s-%s\n", f.FileNum, f.Smallest, f.Largest)
1235+
fmt.Fprintf(&buf, "%d: %s-%s\n", f.FileNum, f.Smallest(), f.Largest())
12361236
}
12371237
return buf.String()
12381238

@@ -1284,8 +1284,8 @@ func TestCompactionOutputFileSize(t *testing.T) {
12841284
m.StatsMarkValid()
12851285
}
12861286
}
1287-
m.SmallestSeqNum = m.Smallest.SeqNum()
1288-
m.LargestSeqNum = m.Largest.SeqNum()
1287+
m.SmallestSeqNum = m.Smallest().SeqNum()
1288+
m.LargestSeqNum = m.Largest().SeqNum()
12891289
m.LargestSeqNumAbsolute = m.LargestSeqNum
12901290
return m, nil
12911291
}

compaction_test.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2014,8 +2014,8 @@ func TestCompactionErrorOnUserKeyOverlap(t *testing.T) {
20142014
base.ParseInternalKey(strings.TrimSpace(parts[0])),
20152015
base.ParseInternalKey(strings.TrimSpace(parts[1])),
20162016
)
2017-
m.SmallestSeqNum = m.Smallest.SeqNum()
2018-
m.LargestSeqNum = m.Largest.SeqNum()
2017+
m.SmallestSeqNum = m.Smallest().SeqNum()
2018+
m.LargestSeqNum = m.Largest().SeqNum()
20192019
m.LargestSeqNumAbsolute = m.LargestSeqNum
20202020
m.InitPhysicalBacking()
20212021
return m
@@ -2146,8 +2146,8 @@ func TestCompactionCheckOrdering(t *testing.T) {
21462146
base.ParseInternalKey(strings.TrimSpace(parts[0])),
21472147
base.ParseInternalKey(strings.TrimSpace(parts[1])),
21482148
)
2149-
m.SmallestSeqNum = m.Smallest.SeqNum()
2150-
m.LargestSeqNum = m.Largest.SeqNum()
2149+
m.SmallestSeqNum = m.Smallest().SeqNum()
2150+
m.LargestSeqNum = m.Largest().SeqNum()
21512151
m.LargestSeqNumAbsolute = m.LargestSeqNum
21522152
m.InitPhysicalBacking()
21532153
return m

data_test.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -944,11 +944,11 @@ func runDBDefineCmdReuseFS(td *datadriven.TestData, opts *Options) (*DB, error)
944944
for _, f := range newVE.NewTables {
945945
if start != nil {
946946
f.Meta.SmallestPointKey = *start
947-
f.Meta.Smallest = *start
947+
f.Meta.ExtendPointKeyBounds(DefaultComparer.Compare, *start, *start)
948948
}
949949
if end != nil {
950950
f.Meta.LargestPointKey = *end
951-
f.Meta.Largest = *end
951+
f.Meta.ExtendPointKeyBounds(DefaultComparer.Compare, *end, *end)
952952
}
953953
if blobDepth > 0 {
954954
f.Meta.BlobReferenceDepth = blobDepth
@@ -999,8 +999,8 @@ func runDBDefineCmdReuseFS(td *datadriven.TestData, opts *Options) (*DB, error)
999999
}
10001000
c := &compaction{
10011001
inputs: []compactionLevel{{}, {level: outputLevel}},
1002-
smallest: m.Smallest,
1003-
largest: m.Largest,
1002+
smallest: m.Smallest(),
1003+
largest: m.Largest(),
10041004
}
10051005
c.startLevel, c.outputLevel = &c.inputs[0], &c.inputs[1]
10061006
return c, nil

download.go

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -252,8 +252,8 @@ func (d *DB) newDownloadSpanTask(vers *version, sp DownloadSpan) (_ *downloadSpa
252252
iter := ls.Iter()
253253
if f := iter.SeekGE(d.cmp, sp.StartKey); f != nil &&
254254
objstorage.IsExternalTable(d.objProvider, f.FileBacking.DiskFileNum) &&
255-
d.cmp(f.Smallest.UserKey, bounds.Start) < 0 {
256-
bounds.Start = f.Smallest.UserKey
255+
d.cmp(f.Smallest().UserKey, bounds.Start) < 0 {
256+
bounds.Start = f.Smallest().UserKey
257257
}
258258
}
259259
startCursor := downloadCursor{
@@ -313,7 +313,7 @@ func (c downloadCursor) String() string {
313313
func makeCursorAtFile(f *tableMetadata, level int) downloadCursor {
314314
return downloadCursor{
315315
level: level,
316-
key: f.Smallest.UserKey,
316+
key: f.Smallest().UserKey,
317317
seqNum: f.LargestSeqNum,
318318
}
319319
}
@@ -323,7 +323,7 @@ func makeCursorAtFile(f *tableMetadata, level int) downloadCursor {
323323
func makeCursorAfterFile(f *tableMetadata, level int) downloadCursor {
324324
return downloadCursor{
325325
level: level,
326-
key: f.Smallest.UserKey,
326+
key: f.Smallest().UserKey,
327327
seqNum: f.LargestSeqNum + 1,
328328
}
329329
}
@@ -380,7 +380,7 @@ func (c downloadCursor) NextExternalFileOnLevel(
380380
firstCursor = c
381381
}
382382
// Trim the end bound as an optimization.
383-
endBound = base.UserKeyInclusive(f.Smallest.UserKey)
383+
endBound = base.UserKeyInclusive(f.Smallest().UserKey)
384384
}
385385
}
386386
return first
@@ -402,7 +402,7 @@ func firstExternalFileInLevelIter(
402402
for f != nil && !cursor.FileIsAfterCursor(cmp, f, cursor.level) {
403403
f = it.Next()
404404
}
405-
for ; f != nil && endBound.IsUpperBoundFor(cmp, f.Smallest.UserKey); f = it.Next() {
405+
for ; f != nil && endBound.IsUpperBoundFor(cmp, f.Smallest().UserKey); f = it.Next() {
406406
if f.Virtual && objstorage.IsExternalTable(objProvider, f.FileBacking.DiskFileNum) {
407407
return f
408408
}
@@ -532,7 +532,7 @@ func (d *DB) tryLaunchDownloadCompaction(
532532

533533
download.bookmarks = append(download.bookmarks, downloadBookmark{
534534
start: makeCursorAtFile(f, level),
535-
endBound: base.UserKeyInclusive(f.Largest.UserKey),
535+
endBound: base.UserKeyInclusive(f.Largest().UserKey),
536536
})
537537
doneCh, ok := d.tryLaunchDownloadForFile(vers, l0Organizer, env, download, level, f)
538538
if ok {

0 commit comments

Comments
 (0)