@@ -118,6 +118,7 @@ type Metrics struct {
118
118
// effective heuristics are at ingesting files into lower levels, saving
119
119
// write amplification.
120
120
BytesWeightedByLevel uint64
121
+ ExciseIngestCount int64
121
122
}
122
123
// PaceDuration is the time waiting for the pacer to allow the workload to
123
124
// continue.
@@ -204,6 +205,9 @@ func (m *Metrics) WriteBenchmarkString(name string, w io.Writer) error {
204
205
{label : "EstimatedDebt/max" , values : []benchfmt.Value {
205
206
{Value : float64 (m .EstimatedDebt .Max ()), Unit : "bytes" },
206
207
}},
208
+ {label : "ExciseDuringIngestion" , values : []benchfmt.Value {
209
+ {Value : float64 (m .Ingest .ExciseIngestCount ), Unit : "excise" },
210
+ }},
207
211
{label : "FlushUtilization" , values : []benchfmt.Value {
208
212
{Value : m .Final .Flush .WriteThroughput .Utilization (), Unit : "util" },
209
213
}},
@@ -563,6 +567,7 @@ func (r *Runner) Wait() (Metrics, error) {
563
567
m .CompactionCounts .Rewrite = pm .Compact .RewriteCount
564
568
m .CompactionCounts .Copy = pm .Compact .CopyCount
565
569
m .CompactionCounts .MultiLevel = pm .Compact .MultiLevelCount
570
+ m .Ingest .ExciseIngestCount = pm .Ingest .ExciseIngestCount
566
571
m .Ingest .BytesIntoL0 = pm .Levels [0 ].TableBytesIngested
567
572
m .Ingest .BytesWeightedByLevel = ingestBytesWeighted
568
573
return m , err
@@ -584,8 +589,10 @@ type workloadStep struct {
584
589
// readAmp estimation for the LSM *before* ve was applied.
585
590
previousReadAmp int
586
591
// non-nil for flushStepKind
587
- flushBatch * pebble.Batch
588
- tablesToIngest []string
592
+ flushBatch * pebble.Batch
593
+ tablesToIngest []string
594
+ // exciseSpan is set for ingestAndExciseStepKind
595
+ exciseSpan pebble.KeyRange
589
596
cumulativeWriteBytes uint64
590
597
}
591
598
@@ -595,6 +602,7 @@ const (
595
602
flushStepKind stepKind = iota
596
603
ingestStepKind
597
604
compactionStepKind
605
+ ingestAndExciseStepKind
598
606
)
599
607
600
608
// eventListener returns a Pebble EventListener that is installed on the replay
@@ -692,6 +700,12 @@ func (r *Runner) applyWorkloadSteps(ctx context.Context) error {
692
700
}
693
701
r .metrics .writeBytes .Store (step .cumulativeWriteBytes )
694
702
r .stepsApplied <- step
703
+ case ingestAndExciseStepKind :
704
+ if _ , err := r .d .IngestAndExcise (context .Background (), step .tablesToIngest , nil /* shared */ , nil /* external */ , step .exciseSpan ); err != nil {
705
+ return err
706
+ }
707
+ r .metrics .writeBytes .Store (step .cumulativeWriteBytes )
708
+ r .stepsApplied <- step
695
709
case compactionStepKind :
696
710
// No-op.
697
711
// TODO(jackson): Should we elide this earlier?
@@ -795,12 +809,22 @@ func (r *Runner) prepareWorkloadSteps(ctx context.Context) error {
795
809
// flush.
796
810
s .kind = ingestStepKind
797
811
}
812
+ if len (ve .ExciseBoundsRecord ) > 0 {
813
+ // If a version edit contains excise bounds records, it's an excise operation.
814
+ // In practice, there should typically be only one excise bounds record per version edit.
815
+ exciseEntry := ve .ExciseBoundsRecord [0 ]
816
+ s .exciseSpan = pebble.KeyRange {
817
+ Start : exciseEntry .Bounds .Start ,
818
+ End : exciseEntry .Bounds .End .Key ,
819
+ }
820
+ s .kind = ingestAndExciseStepKind
821
+ }
798
822
var newFiles []base.DiskFileNum
799
823
blobRefMap := make (map [base.DiskFileNum ]manifest.BlobReferences )
800
824
blobFileMap := make (map [base.BlobFileID ]base.DiskFileNum )
801
825
for _ , nf := range ve .NewTables {
802
826
newFiles = append (newFiles , nf .Meta .TableBacking .DiskFileNum )
803
- if s .kind == ingestStepKind && (nf .Meta .SmallestSeqNum != nf .Meta .LargestSeqNum || nf . Level != 0 ) {
827
+ if s .kind == ingestStepKind && (nf .Meta .SmallestSeqNum != nf .Meta .LargestSeqNum ) {
804
828
s .kind = flushStepKind
805
829
}
806
830
if nf .Meta .BlobReferenceDepth > 0 {
@@ -870,7 +894,7 @@ func (r *Runner) prepareWorkloadSteps(ctx context.Context) error {
870
894
return errors .Wrapf (err , "flush in %q at offset %d" , manifestName , rr .Offset ())
871
895
}
872
896
cumulativeWriteBytes += uint64 (s .flushBatch .Len ())
873
- case ingestStepKind :
897
+ case ingestStepKind , ingestAndExciseStepKind :
874
898
// Copy the ingested sstables into a staging area within the
875
899
// run dir. This is necessary for two reasons:
876
900
// a) Ingest will remove the source file, and we don't want
0 commit comments