Skip to content

Commit 2874a42

Browse files
committed
db: clean up level options
The `Options.Level` field has questionable semantics. It's a slice of arbitrary length and if not all levels are specified, the settings for missing levels are derived at runtime based on the last specified level (with target file size doubling for each level). But if we add a level to the slice, that level gets the L0 defaults (including the target file size). This commits cleans this up: the field is now an array of size `NumLevels` and the defaults are always calculated in the same way: hardcoded defaults for L0, and for all other levels we take the options from the level above and double the file size.
1 parent d6b8e14 commit 2874a42

22 files changed

+304
-137
lines changed

cmd/pebble/db.go

Lines changed: 0 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,6 @@ func newPebbleDB(dir string) DB {
6565
L0CompactionThreshold: 2,
6666
L0StopWritesThreshold: 1000,
6767
LBaseMaxBytes: 64 << 20, // 64 MB
68-
Levels: make([]pebble.LevelOptions, 7),
6968
MaxOpenFiles: 16384,
7069
MemTableSize: 64 << 20,
7170
MemTableStopWritesThreshold: 4,
@@ -100,10 +99,6 @@ func newPebbleDB(dir string) DB {
10099
l.IndexBlockSize = 256 << 10 // 256 KB
101100
l.FilterPolicy = bloom.FilterPolicy(10)
102101
l.FilterType = pebble.TableFilter
103-
if i > 0 {
104-
l.TargetFileSize = opts.Levels[i-1].TargetFileSize * 2
105-
}
106-
l.EnsureDefaults()
107102
}
108103
opts.Levels[6].FilterPolicy = pebble.NoFilterPolicy
109104
opts.FlushSplitBytes = opts.Levels[0].TargetFileSize

cmd/pebble/replay_test.go

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,7 @@ import (
99
"testing"
1010

1111
"github.com/cockroachdb/pebble"
12+
"github.com/cockroachdb/pebble/internal/manifest"
1213
"github.com/stretchr/testify/require"
1314
)
1415

@@ -48,18 +49,18 @@ func TestParseOptionsStr(t *testing.T) {
4849
},
4950
{
5051
c: replayConfig{optionsString: `[Options] [Level "0"] target_file_size=222`},
51-
options: &pebble.Options{Levels: []pebble.LevelOptions{
52-
{TargetFileSize: 222},
52+
options: &pebble.Options{Levels: [manifest.NumLevels]pebble.LevelOptions{
53+
0: {TargetFileSize: 222},
5354
}},
5455
},
5556
{
5657
c: replayConfig{optionsString: `[Options] lbase_max_bytes=10 max_open_files=20 [Level "0"] target_file_size=30 [Level "1"] index_block_size=40`},
5758
options: &pebble.Options{
5859
LBaseMaxBytes: 10,
5960
MaxOpenFiles: 20,
60-
Levels: []pebble.LevelOptions{
61-
{TargetFileSize: 30},
62-
{IndexBlockSize: 40},
61+
Levels: [manifest.NumLevels]pebble.LevelOptions{
62+
0: {TargetFileSize: 30},
63+
1: {IndexBlockSize: 40},
6364
},
6465
},
6566
},

compaction.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -47,7 +47,7 @@ var gcLabels = pprof.Labels("pebble", "gc")
4747
// compacted files. We avoid expanding the lower level file set of a compaction
4848
// if it would make the total compaction cover more than this many bytes.
4949
func expandedCompactionByteSizeLimit(opts *Options, level int, availBytes uint64) uint64 {
50-
v := uint64(25 * opts.Level(level).TargetFileSize)
50+
v := uint64(25 * opts.Levels[level].TargetFileSize)
5151

5252
// Never expand a compaction beyond half the available capacity, divided
5353
// by the maximum number of concurrent compactions. Each of the concurrent
@@ -69,13 +69,13 @@ func expandedCompactionByteSizeLimit(opts *Options, level int, availBytes uint64
6969
// maxGrandparentOverlapBytes is the maximum bytes of overlap with level+1
7070
// before we stop building a single file in a level-1 to level compaction.
7171
func maxGrandparentOverlapBytes(opts *Options, level int) uint64 {
72-
return uint64(10 * opts.Level(level).TargetFileSize)
72+
return uint64(10 * opts.Levels[level].TargetFileSize)
7373
}
7474

7575
// maxReadCompactionBytes is used to prevent read compactions which
7676
// are too wide.
7777
func maxReadCompactionBytes(opts *Options, level int) uint64 {
78-
return uint64(10 * opts.Level(level).TargetFileSize)
78+
return uint64(10 * opts.Levels[level].TargetFileSize)
7979
}
8080

8181
// noCloseIter wraps around a FragmentIterator, intercepting and eliding
@@ -712,7 +712,7 @@ func newFlush(
712712
}
713713

714714
if opts.FlushSplitBytes > 0 {
715-
c.maxOutputFileSize = uint64(opts.Level(0).TargetFileSize)
715+
c.maxOutputFileSize = uint64(opts.Levels[0].TargetFileSize)
716716
c.maxOverlapBytes = maxGrandparentOverlapBytes(opts, 0)
717717
c.grandparents = c.version.Overlaps(baseLevel, c.userKeyBounds())
718718
adjustGrandparentOverlapBytesForFlush(c, flushingBytes)

compaction_picker.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -246,7 +246,7 @@ func newPickedCompaction(
246246
l0Organizer: l0Organizer,
247247
baseLevel: baseLevel,
248248
inputs: []compactionLevel{{level: startLevel}, {level: outputLevel}},
249-
maxOutputFileSize: uint64(opts.Level(adjustedLevel).TargetFileSize),
249+
maxOutputFileSize: uint64(opts.Levels[adjustedLevel].TargetFileSize),
250250
maxOverlapBytes: maxGrandparentOverlapBytes(opts, adjustedLevel),
251251
maxReadCompactionBytes: maxReadCompactionBytes(opts, adjustedLevel),
252252
}
@@ -1877,7 +1877,7 @@ func pickL0(
18771877
// counterproductive.
18781878
lcf = l0Organizer.PickIntraL0Compaction(env.earliestUnflushedSeqNum, minIntraL0Count, env.problemSpans)
18791879
if lcf != nil {
1880-
pc := newPickedCompactionFromL0(lcf, opts, vers, l0Organizer, 0, false)
1880+
pc := newPickedCompactionFromL0(lcf, opts, vers, l0Organizer, baseLevel, false)
18811881
if pc.setupInputs(opts, env.diskAvailBytes, pc.startLevel, env.problemSpans) {
18821882
if pc.startLevel.files.Empty() {
18831883
opts.Logger.Fatalf("empty compaction chosen")

compaction_test.go

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2212,8 +2212,7 @@ func TestCompactionErrorCleanup(t *testing.T) {
22122212
mem := vfs.NewMem()
22132213
ii := errorfs.OnIndex(math.MaxInt32) // start disabled
22142214
opts := &Options{
2215-
FS: errorfs.Wrap(mem, errorfs.ErrInjected.If(ii)),
2216-
Levels: make([]LevelOptions, numLevels),
2215+
FS: errorfs.Wrap(mem, errorfs.ErrInjected.If(ii)),
22172216
EventListener: &EventListener{
22182217
TableCreated: func(info TableCreateInfo) {
22192218
t.Log(info)
@@ -2918,8 +2917,7 @@ func TestCompactionErrorStats(t *testing.T) {
29182917
mem := vfs.NewMem()
29192918
injector := &WriteErrorInjector{}
29202919
opts := &Options{
2921-
FS: errorfs.Wrap(mem, injector),
2922-
Levels: make([]LevelOptions, numLevels),
2920+
FS: errorfs.Wrap(mem, injector),
29232921
EventListener: &EventListener{
29242922
TableCreated: func(info TableCreateInfo) {
29252923
t.Log(info)

data_test.go

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1767,8 +1767,8 @@ func parseDBOptionsArgs(opts *Options, args []datadriven.CmdArg) error {
17671767
}
17681768
opts.Experimental.SpanPolicyFunc = MakeStaticSpanPolicyFunc(opts.Comparer.Compare, span, policy)
17691769
case "target-file-sizes":
1770-
if len(opts.Levels) < len(cmdArg.Vals) {
1771-
opts.Levels = slices.Grow(opts.Levels, len(cmdArg.Vals)-len(opts.Levels))[0:len(cmdArg.Vals)]
1770+
if len(cmdArg.Vals) > len(opts.Levels) {
1771+
return errors.New("too many target-file-sizes")
17721772
}
17731773
for i := range cmdArg.Vals {
17741774
size, err := strconv.ParseInt(cmdArg.Vals[i], 10, 64)
@@ -1777,6 +1777,12 @@ func parseDBOptionsArgs(opts *Options, args []datadriven.CmdArg) error {
17771777
}
17781778
opts.Levels[i].TargetFileSize = size
17791779
}
1780+
// Set the remaining file sizes. Normally, EnsureDefaults() would do that
1781+
// for us but it was already called and the target file sizes for all
1782+
// levels are now set to the defaults.
1783+
for i := len(cmdArg.Vals); i < len(opts.Levels); i++ {
1784+
opts.Levels[i].TargetFileSize = opts.Levels[i-1].TargetFileSize * 2
1785+
}
17801786
case "value-separation":
17811787
if len(cmdArg.Vals) != 3 {
17821788
return errors.New("value-separation-policy expects 3 arguments: (enabled, minimum-size, max-blob-reference-depth)")

excise_test.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,8 @@ func TestExcise(t *testing.T) {
8080
Logger: testLogger{t},
8181
}
8282
if blockSize != 0 {
83-
opts.Levels = append(opts.Levels, LevelOptions{BlockSize: blockSize, IndexBlockSize: 32 << 10})
83+
opts.Levels[0].BlockSize = blockSize
84+
opts.Levels[0].IndexBlockSize = 32 << 10
8485
}
8586
opts.Experimental.RemoteStorage = remote.MakeSimpleFactory(map[remote.Locator]remote.Storage{
8687
"external-locator": remoteStorage,

external_test.go

Lines changed: 4 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -188,7 +188,6 @@ func buildSeparatedValuesDB(
188188
FS: vfs.NewMem(),
189189
KeySchema: cockroachkvs.KeySchema.Name,
190190
KeySchemas: sstable.MakeKeySchemas(&cockroachkvs.KeySchema),
191-
Levels: make([]pebble.LevelOptions, 7),
192191
MemTableSize: 2 << 20,
193192
L0CompactionThreshold: 2,
194193
}
@@ -199,17 +198,10 @@ func buildSeparatedValuesDB(
199198
MaxBlobReferenceDepth: 10,
200199
}
201200
}
202-
for i := 0; i < len(o.Levels); i++ {
203-
l := &o.Levels[i]
204-
l.BlockSize = 32 << 10 // 32 KB
205-
l.IndexBlockSize = 256 << 10 // 256 KB
206-
l.FilterPolicy = bloom.FilterPolicy(10)
207-
l.FilterType = pebble.TableFilter
208-
if i > 0 {
209-
l.TargetFileSize = o.Levels[i-1].TargetFileSize * 2
210-
}
211-
l.EnsureDefaults()
212-
}
201+
o.Levels[0].BlockSize = 32 << 10 // 32 KB
202+
o.Levels[0].IndexBlockSize = 256 << 10 // 256 KB
203+
o.Levels[0].FilterPolicy = bloom.FilterPolicy(10)
204+
o.Levels[0].FilterType = pebble.TableFilter
213205
db, err := pebble.Open("", o)
214206
require.NoError(tb, err)
215207

iterator_test.go

Lines changed: 5 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -969,8 +969,7 @@ func TestIteratorBlockIntervalFilter(t *testing.T) {
969969
FormatMajorVersion: internalFormatNewest,
970970
BlockPropertyCollectors: bpCollectors,
971971
}
972-
lo := LevelOptions{BlockSize: 1, IndexBlockSize: 1}
973-
opts.Levels = append(opts.Levels, lo)
972+
opts.Levels[0] = LevelOptions{BlockSize: 1, IndexBlockSize: 1}
974973

975974
// Automatic compactions may compact away tombstones from L6, making
976975
// some testcases non-deterministic.
@@ -1101,10 +1100,8 @@ func TestIteratorRandomizedBlockIntervalFilter(t *testing.T) {
11011100
opts.L0CompactionFileThreshold = 1 << rng.IntN(11) // 1-1024
11021101
opts.LBaseMaxBytes = 1 << rng.IntN(11) // 1B - 1KB
11031102
opts.MemTableSize = 2 << 10 // 2KB
1104-
var lopts LevelOptions
1105-
lopts.BlockSize = 1 << rng.IntN(8) // 1B - 256B
1106-
lopts.IndexBlockSize = 1 << rng.IntN(8) // 1B - 256B
1107-
opts.Levels = []LevelOptions{lopts}
1103+
opts.Levels[0].BlockSize = 1 << rng.IntN(8) // 1B - 256B
1104+
opts.Levels[0].IndexBlockSize = 1 << rng.IntN(8) // 1B - 256B
11081105

11091106
d, err := Open("", opts)
11101107
require.NoError(t, err)
@@ -2138,7 +2135,7 @@ func TestRangeKeyMaskingRandomized(t *testing.T) {
21382135
sstable.NewTestKeysBlockPropertyCollector,
21392136
},
21402137
}
2141-
opts1.Levels = baseOpts.levelOpts
2138+
copy(opts1.Levels[:], baseOpts.levelOpts)
21422139
d1, err := Open("", opts1)
21432140
require.NoError(t, err)
21442141

@@ -2151,7 +2148,7 @@ func TestRangeKeyMaskingRandomized(t *testing.T) {
21512148
sstable.NewTestKeysBlockPropertyCollector,
21522149
},
21532150
}
2154-
opts2.Levels = randomOpts.levelOpts
2151+
copy(opts2.Levels[:], randomOpts.levelOpts)
21552152
d2, err := Open("", opts2)
21562153
require.NoError(t, err)
21572154

metamorphic/options.go

Lines changed: 7 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -326,16 +326,14 @@ func defaultOptions(kf KeyFormat) *pebble.Options {
326326
// Use an archive cleaner to ease post-mortem debugging.
327327
Cleaner: base.ArchiveCleaner{},
328328
// Always use our custom comparer which provides a Split method.
329-
Comparer: kf.Comparer,
330-
KeySchema: kf.KeySchema.Name,
331-
KeySchemas: sstable.MakeKeySchemas(kf.KeySchema),
332-
FS: vfs.NewMem(),
333-
FormatMajorVersion: defaultFormatMajorVersion,
334-
Levels: []pebble.LevelOptions{{
335-
FilterPolicy: bloom.FilterPolicy(10),
336-
}},
329+
Comparer: kf.Comparer,
330+
KeySchema: kf.KeySchema.Name,
331+
KeySchemas: sstable.MakeKeySchemas(kf.KeySchema),
332+
FS: vfs.NewMem(),
333+
FormatMajorVersion: defaultFormatMajorVersion,
337334
BlockPropertyCollectors: kf.BlockPropertyCollectors,
338335
}
336+
opts.Levels[0].FilterPolicy = bloom.FilterPolicy(10)
339337
opts.Experimental.IngestSplit = func() bool { return false }
340338
opts.Experimental.EnableColumnarBlocks = func() bool { return true }
341339

@@ -808,7 +806,7 @@ func RandomOptions(
808806
default:
809807
lopts.Compression = func() block.Compression { return pebble.SnappyCompression }
810808
}
811-
opts.Levels = []pebble.LevelOptions{lopts}
809+
opts.Levels[0] = lopts
812810

813811
// Explicitly disable disk-backed FS's for the random configurations. The
814812
// single standard test configuration that uses a disk-backed FS is

0 commit comments

Comments
 (0)