Skip to content

Commit

Permalink
Set minimum buffer size of BytesPool to maxChunk size (#1581)
Browse files Browse the repository at this point in the history
Signed-off-by: Jens Hausherr <jens.hausherr@xing.com>
Signed-off-by: Giedrius Statkevičius <giedriuswork@gmail.com>
  • Loading branch information
jabbrwcky authored and GiedriusS committed Oct 28, 2019
1 parent cf9c9f2 commit e6357cf
Showing 1 changed file with 3 additions and 3 deletions.
6 changes: 3 additions & 3 deletions pkg/store/bucket.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,8 @@ import (
// Take a look at Figure 6 in this whitepaper http://www.vldb.org/pvldb/vol8/p1816-teller.pdf.
const maxSamplesPerChunk = 120

const maxChunkSize = 16000

type bucketStoreMetrics struct {
blocksLoaded prometheus.Gauge
blockLoads prometheus.Counter
Expand Down Expand Up @@ -241,7 +243,7 @@ func NewBucketStore(
return nil, errors.Errorf("max concurrency value cannot be lower than 0 (got %v)", maxConcurrent)
}

chunkPool, err := pool.NewBytesPool(2e5, 50e6, 2, maxChunkPoolBytes)
chunkPool, err := pool.NewBytesPool(maxChunkSize, 50e6, 2, maxChunkPoolBytes)
if err != nil {
return nil, errors.Wrap(err, "create chunk pool")
}
Expand Down Expand Up @@ -1768,8 +1770,6 @@ func (r *bucketChunkReader) addPreload(id uint64) error {

// preload all added chunk IDs. Must be called before the first call to Chunk is made.
func (r *bucketChunkReader) preload(samplesLimiter *Limiter) error {
const maxChunkSize = 16000

var g run.Group

numChunks := uint64(0)
Expand Down

0 comments on commit e6357cf

Please sign in to comment.