@@ -3312,6 +3312,7 @@ func (d *DB) compactAndWrite(
3312
3312
tableFormat sstable.TableFormat ,
3313
3313
valueSeparation compact.ValueSeparation ,
3314
3314
) (result compact.Result ) {
3315
+ suggestedCacheReaders := blob .SuggestedCachedReaders (len (c .inputs ))
3315
3316
// Compactions use a pool of buffers to read blocks, avoiding polluting the
3316
3317
// block cache with blocks that will not be read again. We initialize the
3317
3318
// buffer pool with a size 12. This initial size does not need to be
@@ -3320,22 +3321,22 @@ func (d *DB) compactAndWrite(
3320
3321
// choosing a size larger than that working set avoids any additional
3321
3322
// allocations to grow the size of the pool over the course of iteration.
3322
3323
//
3323
- // Justification for initial size 12 : In a two-level compaction, at any
3324
- // given moment we'll have 2 index blocks in-use and 2 data blocks in-use.
3324
+ // Justification for initial size 18 : In a compaction with up to 3 levels,
3325
+ // at any given moment we'll have 3 index blocks in-use and 3 data blocks in-use.
3325
3326
// Additionally, when decoding a compressed block, we'll temporarily
3326
3327
// allocate 1 additional block to hold the compressed buffer. In the worst
3327
- // case that all input sstables have two-level index blocks (+2 ), value
3328
- // blocks (+2 ), range deletion blocks (+n) and range key blocks (+n), we'll
3329
- // additionally require 2n+4 blocks where n is the number of input sstables.
3328
+ // case that all input sstables have two-level index blocks (+3 ), value
3329
+ // blocks (+3 ), range deletion blocks (+n) and range key blocks (+n), we'll
3330
+ // additionally require 2n+6 blocks where n is the number of input sstables.
3330
3331
// Range deletion and range key blocks are relatively rare, and the cost of
3331
3332
// an additional allocation or two over the course of the compaction is
3332
3333
// considered to be okay. A larger initial size would cause the pool to hold
3333
3334
// on to more memory, even when it's not in-use because the pool will
3334
3335
// recycle buffers up to the current capacity of the pool. The memory use of
3335
- // a 12 -buffer pool is expected to be within reason, even if all the buffers
3336
+ // a 18 -buffer pool is expected to be within reason, even if all the buffers
3336
3337
// grow to the typical size of an index block (256 KiB) which would
3337
- // translate to 3 MiB per compaction.
3338
- c .iterationState .bufferPool .Init (12 )
3338
+ // translate to 4.5 MiB per compaction.
3339
+ c .iterationState .bufferPool .Init (18 + suggestedCacheReaders * 2 )
3339
3340
defer c .iterationState .bufferPool .Release ()
3340
3341
blockReadEnv := block.ReadEnv {
3341
3342
BufferPool : & c .iterationState .bufferPool ,
@@ -3346,8 +3347,7 @@ func (d *DB) compactAndWrite(
3346
3347
),
3347
3348
}
3348
3349
if c .version != nil {
3349
- c .iterationState .valueFetcher .Init (& c .version .BlobFiles , d .fileCache , blockReadEnv ,
3350
- blob .SuggestedCachedReaders (len (c .inputs )))
3350
+ c .iterationState .valueFetcher .Init (& c .version .BlobFiles , d .fileCache , blockReadEnv , suggestedCacheReaders )
3351
3351
}
3352
3352
iiopts := internalIterOpts {
3353
3353
compaction : true ,
0 commit comments