Skip to content

Commit a7c0954

Browse files
committed
sstable: remove VirtualReader and CommonReader
Refactor VirtualReaderParams into block.ReadEnv to get rid of VirtualReader and CommonReader.
1 parent 0c0e65f commit a7c0954

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

53 files changed

+698
-733
lines changed

checkpoint.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -283,7 +283,7 @@ func (d *DB) Checkpoint(
283283
}
284284

285285
fileBacking := f.FileBacking
286-
if f.Virtual {
286+
if f.Virtual != nil {
287287
if _, ok := requiredVirtualBackingFiles[fileBacking.DiskFileNum]; ok {
288288
continue
289289
}

cockroachkvs/cockroachkvs_bench_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -132,7 +132,7 @@ func benchmarkRandSeekInSST(
132132
rp := sstable.MakeTrivialReaderProvider(reader)
133133
iter, err := reader.NewPointIter(ctx, sstable.IterOptions{
134134
FilterBlockSizeLimit: sstable.NeverUseFilterBlock,
135-
Env: block.ReadEnv{Stats: &stats, IterStats: nil},
135+
Env: sstable.ReadEnv{Block: block.ReadEnv{Stats: &stats, IterStats: nil}},
136136
ReaderProvider: rp,
137137
})
138138
require.NoError(b, err)
@@ -150,7 +150,7 @@ func benchmarkRandSeekInSST(
150150
key := queryKeys[i%numQueryKeys]
151151
iter, err := reader.NewPointIter(ctx, sstable.IterOptions{
152152
FilterBlockSizeLimit: sstable.NeverUseFilterBlock,
153-
Env: block.ReadEnv{Stats: &stats, IterStats: nil},
153+
Env: sstable.ReadEnv{Block: block.ReadEnv{Stats: &stats, IterStats: nil}},
154154
ReaderProvider: rp,
155155
})
156156
if err != nil {

compaction.go

Lines changed: 17 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -424,7 +424,7 @@ func newCompaction(
424424
if mustCopy {
425425
// If the source is virtual, it's best to just rewrite the file as all
426426
// conditions in the above comment are met.
427-
if !meta.Virtual {
427+
if meta.Virtual == nil {
428428
c.kind = compactionKindCopy
429429
}
430430
} else {
@@ -966,7 +966,7 @@ func (c *compaction) newRangeDelIter(
966966
iterSet, err := newIters(context.Background(), f.TableMetadata, &opts,
967967
internalIterOpts{
968968
compaction: true,
969-
readEnv: block.ReadEnv{BufferPool: &c.bufferPool},
969+
readEnv: sstable.ReadEnv{Block: block.ReadEnv{BufferPool: &c.bufferPool}},
970970
}, iterRangeDeletions)
971971
if err != nil {
972972
return nil, err
@@ -1310,8 +1310,8 @@ func (d *DB) runIngestFlush(c *compaction) (*manifest.VersionEdit, error) {
13101310

13111311
// This file fits perfectly within the excise span, so we can slot it at L6.
13121312
if ingestFlushable.exciseSpan.Valid() &&
1313-
ingestFlushable.exciseSpan.Contains(d.cmp, file.TableMetadata.Smallest) &&
1314-
ingestFlushable.exciseSpan.Contains(d.cmp, file.TableMetadata.Largest) {
1313+
ingestFlushable.exciseSpan.Contains(d.cmp, file.Smallest) &&
1314+
ingestFlushable.exciseSpan.Contains(d.cmp, file.Largest) {
13151315
level = 6
13161316
} else {
13171317
// TODO(radu): this can perform I/O; we should not do this while holding DB.mu.
@@ -1320,18 +1320,18 @@ func (d *DB) runIngestFlush(c *compaction) (*manifest.VersionEdit, error) {
13201320
return nil, err
13211321
}
13221322
level, fileToSplit, err = ingestTargetLevel(
1323-
ctx, d.cmp, lsmOverlap, baseLevel, d.mu.compact.inProgress, file.TableMetadata, suggestSplit,
1323+
ctx, d.cmp, lsmOverlap, baseLevel, d.mu.compact.inProgress, file, suggestSplit,
13241324
)
13251325
if err != nil {
13261326
return nil, err
13271327
}
13281328
}
13291329

13301330
// Add the current flushableIngest file to the version.
1331-
ve.NewTables = append(ve.NewTables, newTableEntry{Level: level, Meta: file.TableMetadata})
1331+
ve.NewTables = append(ve.NewTables, newTableEntry{Level: level, Meta: file})
13321332
if fileToSplit != nil {
13331333
ingestSplitFiles = append(ingestSplitFiles, ingestSplitFile{
1334-
ingestFile: file.TableMetadata,
1334+
ingestFile: file,
13351335
splitFile: fileToSplit,
13361336
level: level,
13371337
})
@@ -2401,7 +2401,7 @@ func (d *DB) cleanupVersionEdit(ve *versionEdit) {
24012401
deletedFiles[key.FileNum] = struct{}{}
24022402
}
24032403
for i := range ve.NewTables {
2404-
if ve.NewTables[i].Meta.Virtual {
2404+
if ve.NewTables[i].Meta.Virtual != nil {
24052405
// We handle backing files separately.
24062406
continue
24072407
}
@@ -2561,8 +2561,8 @@ func (d *DB) runCopyCompaction(
25612561
panic("pebble: scheduled a copy compaction that is not actually moving files to shared storage")
25622562
}
25632563
// Note that based on logic in the compaction picker, we're guaranteed
2564-
// inputMeta.Virtual is false.
2565-
if inputMeta.Virtual {
2564+
// inputMeta.Virtual is nil.
2565+
if inputMeta.Virtual != nil {
25662566
panic(errors.AssertionFailedf("cannot do a copy compaction of a virtual sstable across local/remote storage"))
25672567
}
25682568
}
@@ -2660,12 +2660,12 @@ func (d *DB) runCopyCompaction(
26602660

26612661
// NB: external files are always virtual.
26622662
var wrote uint64
2663-
err = d.fileCache.withVirtualReader(ctx, block.NoReadEnv, inputMeta.VirtualMeta(), func(r sstable.VirtualReader, _ block.ReadEnv) error {
2663+
err = d.fileCache.withReader(ctx, block.NoReadEnv, inputMeta.VirtualMeta(), func(r *sstable.Reader, env sstable.ReadEnv) error {
26642664
var err error
26652665
// TODO(radu): plumb a ReadEnv to CopySpan (it could use the buffer pool
26662666
// or update category stats).
26672667
wrote, err = sstable.CopySpan(ctx,
2668-
src, r.UnsafeReader(), d.opts.MakeReaderOptions(),
2668+
src, r, d.opts.MakeReaderOptions(),
26692669
w, d.opts.MakeWriterOptions(c.outputLevel.level, d.TableFormat()),
26702670
start, end,
26712671
)
@@ -2700,7 +2700,7 @@ func (d *DB) runCopyCompaction(
27002700
Level: c.outputLevel.level,
27012701
Meta: newMeta,
27022702
}}
2703-
if newMeta.Virtual {
2703+
if newMeta.Virtual != nil {
27042704
ve.CreatedBackingTables = []*fileBacking{newMeta.FileBacking}
27052705
}
27062706
c.metrics[c.outputLevel.level] = &LevelMetrics{
@@ -2904,7 +2904,7 @@ func (d *DB) runDeleteOnlyCompaction(
29042904
// NewFiles.
29052905
usedBackingFiles := make(map[base.DiskFileNum]struct{})
29062906
for _, e := range ve.NewTables {
2907-
if e.Meta.Virtual {
2907+
if e.Meta.Virtual != nil {
29082908
usedBackingFiles[e.Meta.FileBacking.DiskFileNum] = struct{}{}
29092909
}
29102910
}
@@ -3078,18 +3078,18 @@ func (d *DB) compactAndWrite(
30783078
// translate to 3 MiB per compaction.
30793079
c.bufferPool.Init(12)
30803080
defer c.bufferPool.Release()
3081-
env := block.ReadEnv{
3081+
blockReadEnv := block.ReadEnv{
30823082
BufferPool: &c.bufferPool,
30833083
Stats: &c.stats,
30843084
IterStats: d.fileCache.SSTStatsCollector().Accumulator(
30853085
uint64(uintptr(unsafe.Pointer(c))),
30863086
categoryCompaction,
30873087
),
30883088
}
3089-
c.valueFetcher.Init(d.fileCache, env)
3089+
c.valueFetcher.Init(d.fileCache, blockReadEnv)
30903090
iiopts := internalIterOpts{
30913091
compaction: true,
3092-
readEnv: env,
3092+
readEnv: sstable.ReadEnv{Block: blockReadEnv},
30933093
blobValueFetcher: &c.valueFetcher,
30943094
}
30953095

compaction_picker.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1186,7 +1186,7 @@ func pickCompactionSeedFile(
11861186
func responsibleForGarbageBytes(
11871187
virtualBackings *manifest.VirtualBackings, m *tableMetadata,
11881188
) uint64 {
1189-
if !m.Virtual {
1189+
if m.Virtual == nil {
11901190
return 0
11911191
}
11921192
useCount, virtualizedSize := virtualBackings.Usage(m.FileBacking.DiskFileNum)

compaction_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -600,7 +600,7 @@ func TestCompaction(t *testing.T) {
600600
defer provider.Close()
601601
for _, levelMetadata := range v.Levels {
602602
for meta := range levelMetadata.All() {
603-
if meta.Virtual {
603+
if meta.Virtual != nil {
604604
continue
605605
}
606606
f, err := provider.OpenForReading(context.Background(), base.FileTypeTable, meta.FileBacking.DiskFileNum, objstorage.OpenOptions{})
@@ -3001,7 +3001,7 @@ func hasExternalFiles(d *DB) bool {
30013001
for level := 0; level < manifest.NumLevels; level++ {
30023002
iter := v.Levels[level].Iter()
30033003
for m := iter.First(); m != nil; m = iter.Next() {
3004-
if m.Virtual {
3004+
if m.Virtual != nil {
30053005
meta, err := d.objProvider.Lookup(base.FileTypeTable, m.FileBacking.DiskFileNum)
30063006
if err != nil {
30073007
panic(err)

data_test.go

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1286,11 +1286,13 @@ func runSSTablePropertiesCmd(t *testing.T, td *datadriven.TestData, d *DB) strin
12861286
}
12871287
defer r.Close()
12881288

1289-
var v sstable.VirtualReader
12901289
props := r.Properties.String()
1291-
if m != nil && m.Virtual {
1292-
v = sstable.MakeVirtualReader(r, m.VirtualMeta().VirtualReaderParams(false /* isShared */))
1293-
props = v.Properties.String()
1290+
env := sstable.ReadEnv{}
1291+
if m != nil && m.Virtual != nil {
1292+
m.InitVirtual(false /* isShared */)
1293+
env.Virtual = m.Virtual
1294+
scaledProps := r.Properties.GetScaledProperties(env.Virtual.BackingSize, env.Virtual.Size)
1295+
props = scaledProps.String()
12941296
}
12951297
if len(td.Input) == 0 {
12961298
return props

db.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1353,7 +1353,7 @@ type internalIterOpts struct {
13531353
// NewCompactionIter; these iterators have a more constrained interface
13541354
// and are optimized for the sequential scan of a compaction.
13551355
compaction bool
1356-
readEnv block.ReadEnv
1356+
readEnv sstable.ReadEnv
13571357
boundLimitedFilter sstable.BoundLimitedBlockPropertyFilter
13581358
// blobValueFetcher is the base.ValueFetcher to use when constructing
13591359
// internal values to represent values stored externally in blob files.
@@ -1423,7 +1423,7 @@ func (i *Iterator) constructPointIter(
14231423
}
14241424
i.blobValueFetcher.Init(i.fc, readEnv)
14251425
internalOpts := internalIterOpts{
1426-
readEnv: readEnv,
1426+
readEnv: sstable.ReadEnv{Block: readEnv},
14271427
blobValueFetcher: &i.blobValueFetcher,
14281428
}
14291429
if i.opts.RangeKeyMasking.Filter != nil {
@@ -2248,7 +2248,7 @@ func (d *DB) SSTables(opts ...SSTablesOption) ([][]SSTableInfo, error) {
22482248
}
22492249
destTables[j].Properties = p
22502250
}
2251-
destTables[j].Virtual = m.Virtual
2251+
destTables[j].Virtual = m.Virtual != nil
22522252
destTables[j].BackingSSTNum = m.FileBacking.DiskFileNum
22532253
objMeta, err := d.objProvider.Lookup(base.FileTypeTable, m.FileBacking.DiskFileNum)
22542254
if err != nil {

download.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -403,7 +403,7 @@ func firstExternalFileInLevelIter(
403403
f = it.Next()
404404
}
405405
for ; f != nil && endBound.IsUpperBoundFor(cmp, f.Smallest.UserKey); f = it.Next() {
406-
if f.Virtual && objstorage.IsExternalTable(objProvider, f.FileBacking.DiskFileNum) {
406+
if f.Virtual != nil && objstorage.IsExternalTable(objProvider, f.FileBacking.DiskFileNum) {
407407
return f
408408
}
409409
}

download_test.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -181,7 +181,7 @@ func TestDownloadTask(t *testing.T) {
181181
ch <- ErrCancelledCompaction
182182
} else {
183183
fmt.Fprintf(&buf, "downloading %s\n", f.FileNum)
184-
f.Virtual = false
184+
f.Virtual = nil
185185
f.FileBacking.DiskFileNum = base.DiskFileNum(f.FileNum)
186186
ch <- nil
187187
}

excise.go

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ import (
1212
"github.com/cockroachdb/pebble/internal/base"
1313
"github.com/cockroachdb/pebble/internal/invariants"
1414
"github.com/cockroachdb/pebble/internal/manifest"
15+
"github.com/cockroachdb/pebble/sstable/virtual"
1516
)
1617

1718
// Excise atomically deletes all data overlapping with the provided span. All
@@ -102,7 +103,7 @@ func (d *DB) exciseTable(
102103
// https://github.com/cockroachdb/pebble/issues/2112 .
103104
if d.cmp(m.Smallest.UserKey, exciseSpan.Start) < 0 {
104105
leftTable = &tableMetadata{
105-
Virtual: true,
106+
Virtual: &virtual.VirtualReaderParams{},
106107
FileBacking: m.FileBacking,
107108
FileNum: d.mu.versions.getNextFileNum(),
108109
// Note that these are loose bounds for smallest/largest seqnums, but they're
@@ -135,7 +136,7 @@ func (d *DB) exciseTable(
135136
// See comment before the definition of leftFile for the motivation behind
136137
// calculating tight user-key bounds.
137138
rightTable = &tableMetadata{
138-
Virtual: true,
139+
Virtual: &virtual.VirtualReaderParams{},
139140
FileBacking: m.FileBacking,
140141
FileNum: d.mu.versions.getNextFileNum(),
141142
// Note that these are loose bounds for smallest/largest seqnums, but they're
@@ -351,7 +352,7 @@ func applyExciseToVersionEdit(
351352
if leftTable == nil && rightTable == nil {
352353
return
353354
}
354-
if !originalTable.Virtual {
355+
if originalTable.Virtual == nil {
355356
// If the original table was virtual, then its file backing is already known
356357
// to the manifest; we don't need to create another file backing. Note that
357358
// there must be only one CreatedBackingTables entry per backing sstable.

0 commit comments

Comments
 (0)