Skip to content

Commit cca0dfd

Browse files
committed
db: move TargetFileSize out of LevelOptions
`LevelOptions.TargetFileSize` is used in a way that is not consistent with the rest of the level options. Whereas other level options apply to the expected level (e.g. `Levels[1]` applies to `L1`), the target file size applies to the level relative to Lbase. Specifically for i > 0, `Levels[i].TargetFileSize` is the target file size of Lbase+i-1. We move this field to its own separate array and add a convenience `TargetFileSize()` method. We don't change the options encoding.
1 parent 9ba5661 commit cca0dfd

13 files changed

+125
-99
lines changed

cmd/pebble/db.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -103,7 +103,7 @@ func newPebbleDB(dir string) DB {
103103
l.FilterType = pebble.TableFilter
104104
}
105105
opts.Levels[6].FilterPolicy = pebble.NoFilterPolicy
106-
opts.FlushSplitBytes = opts.Levels[0].TargetFileSize
106+
opts.FlushSplitBytes = opts.TargetFileSizes[0]
107107

108108
opts.EnsureDefaults()
109109

cmd/pebble/replay_test.go

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -49,17 +49,19 @@ func TestParseOptionsStr(t *testing.T) {
4949
},
5050
{
5151
c: replayConfig{optionsString: `[Options] [Level "0"] target_file_size=222`},
52-
options: &pebble.Options{Levels: [manifest.NumLevels]pebble.LevelOptions{
53-
0: {TargetFileSize: 222},
54-
}},
52+
options: &pebble.Options{
53+
TargetFileSizes: [manifest.NumLevels]int64{0: 222},
54+
},
5555
},
5656
{
5757
c: replayConfig{optionsString: `[Options] lbase_max_bytes=10 max_open_files=20 [Level "0"] target_file_size=30 [Level "1"] index_block_size=40`},
5858
options: &pebble.Options{
5959
LBaseMaxBytes: 10,
6060
MaxOpenFiles: 20,
61+
TargetFileSizes: [manifest.NumLevels]int64{
62+
0: 30,
63+
},
6164
Levels: [manifest.NumLevels]pebble.LevelOptions{
62-
0: {TargetFileSize: 30},
6365
1: {IndexBlockSize: 40},
6466
},
6567
},

compaction.go

Lines changed: 10 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -46,8 +46,10 @@ var gcLabels = pprof.Labels("pebble", "gc")
4646
// expandedCompactionByteSizeLimit is the maximum number of bytes in all
4747
// compacted files. We avoid expanding the lower level file set of a compaction
4848
// if it would make the total compaction cover more than this many bytes.
49-
func expandedCompactionByteSizeLimit(opts *Options, level int, availBytes uint64) uint64 {
50-
v := uint64(25 * opts.Levels[level].TargetFileSize)
49+
func expandedCompactionByteSizeLimit(
50+
opts *Options, targetFileSize int64, availBytes uint64,
51+
) uint64 {
52+
v := uint64(25 * targetFileSize)
5153

5254
// Never expand a compaction beyond half the available capacity, divided
5355
// by the maximum number of concurrent compactions. Each of the concurrent
@@ -68,14 +70,14 @@ func expandedCompactionByteSizeLimit(opts *Options, level int, availBytes uint64
6870

6971
// maxGrandparentOverlapBytes is the maximum bytes of overlap with level+1
7072
// before we stop building a single file in a level-1 to level compaction.
71-
func maxGrandparentOverlapBytes(opts *Options, level int) uint64 {
72-
return uint64(10 * opts.Levels[level].TargetFileSize)
73+
func maxGrandparentOverlapBytes(targetFileSize int64) uint64 {
74+
return uint64(10 * targetFileSize)
7375
}
7476

7577
// maxReadCompactionBytes is used to prevent read compactions which
7678
// are too wide.
77-
func maxReadCompactionBytes(opts *Options, level int) uint64 {
78-
return uint64(10 * opts.Levels[level].TargetFileSize)
79+
func maxReadCompactionBytes(targetFileSize int64) uint64 {
80+
return uint64(10 * targetFileSize)
7981
}
8082

8183
// noCloseIter wraps around a FragmentIterator, intercepting and eliding
@@ -774,8 +776,8 @@ func newFlush(
774776
}
775777

776778
if opts.FlushSplitBytes > 0 {
777-
c.maxOutputFileSize = uint64(opts.Levels[0].TargetFileSize)
778-
c.maxOverlapBytes = maxGrandparentOverlapBytes(opts, 0)
779+
c.maxOutputFileSize = uint64(opts.TargetFileSizes[0])
780+
c.maxOverlapBytes = maxGrandparentOverlapBytes(opts.TargetFileSizes[0])
779781
c.grandparents = c.version.Overlaps(baseLevel, c.bounds)
780782
adjustGrandparentOverlapBytesForFlush(c, flushingBytes)
781783
}

compaction_picker.go

Lines changed: 10 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -230,15 +230,15 @@ func newPickedCompaction(
230230
startLevel, baseLevel))
231231
}
232232

233-
adjustedLevel := adjustedOutputLevel(outputLevel, baseLevel)
233+
targetFileSize := opts.TargetFileSize(outputLevel, baseLevel)
234234
pc := &pickedCompaction{
235235
version: cur,
236236
l0Organizer: l0Organizer,
237237
baseLevel: baseLevel,
238238
inputs: []compactionLevel{{level: startLevel}, {level: outputLevel}},
239-
maxOutputFileSize: uint64(opts.Levels[adjustedLevel].TargetFileSize),
240-
maxOverlapBytes: maxGrandparentOverlapBytes(opts, adjustedLevel),
241-
maxReadCompactionBytes: maxReadCompactionBytes(opts, adjustedLevel),
239+
maxOutputFileSize: uint64(targetFileSize),
240+
maxOverlapBytes: maxGrandparentOverlapBytes(targetFileSize),
241+
maxReadCompactionBytes: maxReadCompactionBytes(targetFileSize),
242242
}
243243
pc.startLevel = &pc.inputs[0]
244244
pc.outputLevel = &pc.inputs[1]
@@ -382,9 +382,8 @@ func (pc *pickedCompaction) setupInputs(
382382
// maxExpandedBytes is the maximum size of an expanded compaction. If
383383
// growing a compaction results in a larger size, the original compaction
384384
// is used instead.
385-
maxExpandedBytes := expandedCompactionByteSizeLimit(
386-
opts, adjustedOutputLevel(pc.outputLevel.level, pc.baseLevel), diskAvailBytes,
387-
)
385+
targetFileSize := opts.TargetFileSize(pc.outputLevel.level, pc.baseLevel)
386+
maxExpandedBytes := expandedCompactionByteSizeLimit(opts, targetFileSize, diskAvailBytes)
388387

389388
// Grow the sstables in inputLevel.level as long as it doesn't affect the number
390389
// of sstables included from pc.outputLevel.level.
@@ -1692,8 +1691,8 @@ func (pc *pickedCompaction) maybeAddLevel(opts *Options, diskAvailBytes uint64)
16921691
if !opts.Experimental.MultiLevelCompactionHeuristic.allowL0() && pc.startLevel.level == 0 {
16931692
return pc
16941693
}
1695-
if pc.estimatedInputSize() > expandedCompactionByteSizeLimit(
1696-
opts, adjustedOutputLevel(pc.outputLevel.level, pc.baseLevel), diskAvailBytes) {
1694+
targetFileSize := opts.TargetFileSize(pc.outputLevel.level, pc.baseLevel)
1695+
if pc.estimatedInputSize() > expandedCompactionByteSizeLimit(opts, targetFileSize, diskAvailBytes) {
16971696
// Don't add a level if the current compaction exceeds the compaction size limit
16981697
return pc
16991698
}
@@ -1783,8 +1782,8 @@ func (wa WriteAmpHeuristic) pick(
17831782
// We consider the addition of a level as an "expansion" of the compaction.
17841783
// If pcMulti is past the expanded compaction byte size limit already,
17851784
// we don't consider it.
1786-
if pcMulti.estimatedInputSize() >= expandedCompactionByteSizeLimit(
1787-
opts, adjustedOutputLevel(pcMulti.outputLevel.level, pcMulti.baseLevel), diskAvailBytes) {
1785+
targetFileSize := opts.TargetFileSize(pcMulti.outputLevel.level, pcMulti.baseLevel)
1786+
if pcMulti.estimatedInputSize() >= expandedCompactionByteSizeLimit(opts, targetFileSize, diskAvailBytes) {
17881787
return pcOrig
17891788
}
17901789
picked := pcOrig

compaction_test.go

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -467,39 +467,39 @@ func TestPickCompaction(t *testing.T) {
467467
1: {
468468
newFileMeta(
469469
200,
470-
expandedCompactionByteSizeLimit(opts, 1, math.MaxUint64)-1,
470+
expandedCompactionByteSizeLimit(opts, 4<<20, math.MaxUint64)-1,
471471
base.ParseInternalKey("i1.SET.201"),
472472
base.ParseInternalKey("i2.SET.202"),
473473
),
474474
newFileMeta(
475475
210,
476-
expandedCompactionByteSizeLimit(opts, 1, math.MaxUint64)-1,
476+
expandedCompactionByteSizeLimit(opts, 4<<20, math.MaxUint64)-1,
477477
base.ParseInternalKey("j1.SET.211"),
478478
base.ParseInternalKey("j2.SET.212"),
479479
),
480480
newFileMeta(
481481
220,
482-
expandedCompactionByteSizeLimit(opts, 1, math.MaxUint64)-1,
482+
expandedCompactionByteSizeLimit(opts, 4<<20, math.MaxUint64)-1,
483483
base.ParseInternalKey("k1.SET.221"),
484484
base.ParseInternalKey("k2.SET.222"),
485485
),
486486
newFileMeta(
487487
230,
488-
expandedCompactionByteSizeLimit(opts, 1, math.MaxUint64)-1,
488+
expandedCompactionByteSizeLimit(opts, 4<<20, math.MaxUint64)-1,
489489
base.ParseInternalKey("l1.SET.231"),
490490
base.ParseInternalKey("l2.SET.232"),
491491
),
492492
},
493493
2: {
494494
newFileMeta(
495495
300,
496-
expandedCompactionByteSizeLimit(opts, 2, math.MaxUint64)-1,
496+
expandedCompactionByteSizeLimit(opts, 8<<20, math.MaxUint64)-1,
497497
base.ParseInternalKey("a0.SET.301"),
498498
base.ParseInternalKey("l0.SET.302"),
499499
),
500500
newFileMeta(
501501
310,
502-
expandedCompactionByteSizeLimit(opts, 2, math.MaxUint64)-1,
502+
expandedCompactionByteSizeLimit(opts, 8<<20, math.MaxUint64)-1,
503503
base.ParseInternalKey("l2.SET.311"),
504504
base.ParseInternalKey("z2.SET.312"),
505505
),
@@ -2229,8 +2229,8 @@ func TestCompactionErrorCleanup(t *testing.T) {
22292229
},
22302230
}
22312231
opts.WithFSDefaults()
2232-
for i := range opts.Levels {
2233-
opts.Levels[i].TargetFileSize = 1
2232+
for i := range opts.TargetFileSizes {
2233+
opts.TargetFileSizes[i] = 1
22342234
}
22352235
opts.testingRandomized(t)
22362236
d, err := Open("", opts)
@@ -2932,8 +2932,8 @@ func TestCompactionErrorStats(t *testing.T) {
29322932
},
29332933
}
29342934
opts.WithFSDefaults()
2935-
for i := range opts.Levels {
2936-
opts.Levels[i].TargetFileSize = 1
2935+
for i := range opts.TargetFileSizes {
2936+
opts.TargetFileSizes[i] = 1
29372937
}
29382938
opts.testingRandomized(t)
29392939
d, err := Open("", opts)

data_test.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1779,13 +1779,13 @@ func parseDBOptionsArgs(opts *Options, args []datadriven.CmdArg) error {
17791779
if err != nil {
17801780
return err
17811781
}
1782-
opts.Levels[i].TargetFileSize = size
1782+
opts.TargetFileSizes[i] = size
17831783
}
17841784
// Set the remaining file sizes. Normally, EnsureDefaults() would do that
17851785
// for us but it was already called and the target file sizes for all
17861786
// levels are now set to the defaults.
1787-
for i := len(cmdArg.Vals); i < len(opts.Levels); i++ {
1788-
opts.Levels[i].TargetFileSize = opts.Levels[i-1].TargetFileSize * 2
1787+
for i := len(cmdArg.Vals); i < len(opts.TargetFileSizes); i++ {
1788+
opts.TargetFileSizes[i] = opts.TargetFileSizes[i-1] * 2
17891789
}
17901790
case "value-separation":
17911791
if len(cmdArg.Vals) != 3 {

iterator_test.go

Lines changed: 12 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2098,25 +2098,25 @@ func TestRangeKeyMaskingRandomized(t *testing.T) {
20982098
// Define a simple base testOpts, and a randomized testOpts. The results
20992099
// of iteration will be compared.
21002100
type testOpts struct {
2101-
levelOpts []LevelOptions
2102-
filter func() BlockPropertyFilterMask
2101+
levelOpts []LevelOptions
2102+
targetFileSizes []int64
2103+
filter func() BlockPropertyFilterMask
21032104
}
21042105

21052106
baseOpts := testOpts{
2106-
levelOpts: make([]LevelOptions, 7),
2107+
levelOpts: make([]LevelOptions, 7),
2108+
targetFileSizes: make([]int64, 7),
21072109
}
21082110
for i := 0; i < len(baseOpts.levelOpts); i++ {
2109-
baseOpts.levelOpts[i].TargetFileSize = 1
2111+
baseOpts.targetFileSizes[i] = 1
21102112
baseOpts.levelOpts[i].BlockSize = 1
21112113
}
21122114

21132115
randomOpts := testOpts{
21142116
levelOpts: []LevelOptions{
2115-
{
2116-
TargetFileSize: int64(1 + rng.IntN(2<<20)), // Vary the L0 file size.
2117-
BlockSize: 1 + rng.IntN(32<<10),
2118-
},
2117+
{BlockSize: 1 + rng.IntN(32<<10)},
21192118
},
2119+
targetFileSizes: []int64{int64(1 + rng.IntN(2<<20))}, // Vary the L0 file size.
21202120
}
21212121
if rng.IntN(2) == 0 {
21222122
randomOpts.filter = func() BlockPropertyFilterMask {
@@ -2135,6 +2135,7 @@ func TestRangeKeyMaskingRandomized(t *testing.T) {
21352135
sstable.NewTestKeysBlockPropertyCollector,
21362136
},
21372137
}
2138+
copy(opts1.TargetFileSizes[:], baseOpts.targetFileSizes)
21382139
copy(opts1.Levels[:], baseOpts.levelOpts)
21392140
d1, err := Open("", opts1)
21402141
require.NoError(t, err)
@@ -2148,6 +2149,7 @@ func TestRangeKeyMaskingRandomized(t *testing.T) {
21482149
sstable.NewTestKeysBlockPropertyCollector,
21492150
},
21502151
}
2152+
copy(opts2.TargetFileSizes[:], randomOpts.targetFileSizes)
21512153
copy(opts2.Levels[:], randomOpts.levelOpts)
21522154
d2, err := Open("", opts2)
21532155
require.NoError(t, err)
@@ -2689,8 +2691,8 @@ func buildFragmentedRangeKey(b testing.TB, seed uint64) (d *DB, keys [][]byte) {
26892691
L0CompactionFileThreshold: 1,
26902692
}
26912693
opts.EnsureDefaults()
2692-
for l := 0; l < len(opts.Levels); l++ {
2693-
opts.Levels[l].TargetFileSize = 1
2694+
for l := range opts.TargetFileSizes {
2695+
opts.TargetFileSizes[l] = 1
26942696
}
26952697
var err error
26962698
d, err = Open("", opts)

metamorphic/options.go

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -770,21 +770,22 @@ func RandomOptions(
770770
opts.AllocatorSizeClasses = pebble.JemallocSizeClasses
771771
}
772772

773-
var lopts pebble.LevelOptions
774-
lopts.BlockRestartInterval = 1 + rng.IntN(64) // 1 - 64
775-
lopts.BlockSize = 1 << uint(rng.IntN(24)) // 1 - 16MB
776-
lopts.BlockSizeThreshold = 50 + rng.IntN(50) // 50 - 100
777-
lopts.IndexBlockSize = 1 << uint(rng.IntN(24)) // 1 - 16MB
778-
lopts.TargetFileSize = 1 << uint(rng.IntN(28)) // 1 - 256MB
779-
if lopts.TargetFileSize < 1<<12 {
773+
opts.TargetFileSizes[0] = 1 << uint(rng.IntN(28)) // 1 - 256MB
774+
if opts.TargetFileSizes[0] < 1<<12 {
780775
// We will generate a lot of files, which will slow down compactions.
781776
// Increase L0StopWritesThreshold to reduce the number of write stalls
782777
// that could cause operation timeouts.
783778
opts.L0StopWritesThreshold = 100
784779
}
785780
// The EstimatedSize of an empty table writer is 8 bytes. We want something a
786781
// little bigger than that as the minimum target.
787-
lopts.TargetFileSize = max(lopts.TargetFileSize, 12)
782+
opts.TargetFileSizes[0] = max(opts.TargetFileSizes[0], 12)
783+
784+
var lopts pebble.LevelOptions
785+
lopts.BlockRestartInterval = 1 + rng.IntN(64) // 1 - 64
786+
lopts.BlockSize = 1 << uint(rng.IntN(24)) // 1 - 16MB
787+
lopts.BlockSizeThreshold = 50 + rng.IntN(50) // 50 - 100
788+
lopts.IndexBlockSize = 1 << uint(rng.IntN(24)) // 1 - 16MB
788789

789790
// We either use no bloom filter, the default filter, or a filter with
790791
// randomized bits-per-key setting. We zero out the Filters map. It'll get

metrics_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -178,7 +178,7 @@ func TestMetrics(t *testing.T) {
178178
MaxBlobReferenceDepth: 5,
179179
}
180180
}
181-
opts.Levels[0] = LevelOptions{TargetFileSize: 50}
181+
opts.TargetFileSizes[0] = 50
182182

183183
// Prevent foreground flushes and compactions from triggering asynchronous
184184
// follow-up compactions. This avoids asynchronously-scheduled work from

0 commit comments

Comments
 (0)