@@ -168,9 +168,8 @@ type DefaultPlanner struct {
168168}
169169
170170type fileStore interface {
171- Stats () []FileStat
171+ Stats () []ExtFileStat
172172 LastModified () time.Time
173- BlockCount (path string , idx int ) int
174173 ParseFileName (path string ) (int , int , error )
175174 NextGeneration () int
176175 TSMReader (path string ) (* TSMReader , error )
@@ -190,7 +189,7 @@ func NewDefaultPlanner(fs fileStore, writeColdDuration time.Duration) *DefaultPl
190189// 000001 each with different sequence numbers.
191190type tsmGeneration struct {
192191 id int
193- files []FileStat
192+ files []ExtFileStat
194193 parseFileName ParseFileNameFunc
195194}
196195
@@ -265,7 +264,10 @@ func (c *DefaultPlanner) generationsFullyCompacted(gens tsmGenerations) (bool, s
265264 aggressivePointsPerBlockCount := 0
266265 filesUnderMaxTsmSizeCount := 0
267266 for _ , tsmFile := range gens [0 ].files {
268- if c .FileStore .BlockCount (tsmFile .Path , 1 ) >= c .GetAggressiveCompactionPointsPerBlock () {
267+ // We check for greater than the default points per block here because the admin may
268+ // have increased aggressive points per block in the config and wants to
269+ // recompact files at the new higher max.
270+ if tsmFile .FirstBlockCount > tsdb .DefaultMaxPointsPerBlock {
269271 aggressivePointsPerBlockCount ++
270272 }
271273 if tsmFile .Size < tsdb .MaxTSMFileSize {
@@ -505,7 +507,7 @@ func (c *DefaultPlanner) Plan(lastWrite time.Time) ([]CompactionGroup, int64) {
505507 var skip bool
506508
507509 // Skip the file if it's over the max size and contains a full block and it does not have any tombstones
508- if len (generations ) > 2 && group .size () > uint64 (tsdb .MaxTSMFileSize ) && c . FileStore . BlockCount ( group .files [0 ].Path , 1 ) >= tsdb .DefaultMaxPointsPerBlock && ! group .hasTombstones () {
510+ if len (generations ) > 2 && group .size () > uint64 (tsdb .MaxTSMFileSize ) && group .files [0 ].FirstBlockCount >= tsdb .DefaultMaxPointsPerBlock && ! group .hasTombstones () {
509511 skip = true
510512 }
511513
@@ -581,7 +583,7 @@ func (c *DefaultPlanner) Plan(lastWrite time.Time) ([]CompactionGroup, int64) {
581583 // Skip the file if it's over the max size and contains a full block or the generation is split
582584 // over multiple files. In the latter case, that would mean the data in the file spilled over
583585 // the 2GB limit.
584- if g .size () > uint64 (tsdb .MaxTSMFileSize ) && c . FileStore . BlockCount ( g .files [0 ].Path , 1 ) >= tsdb .DefaultMaxPointsPerBlock {
586+ if g .size () > uint64 (tsdb .MaxTSMFileSize ) && g .files [0 ].FirstBlockCount >= tsdb .DefaultMaxPointsPerBlock {
585587 start = i + 1
586588 }
587589
@@ -625,7 +627,7 @@ func (c *DefaultPlanner) Plan(lastWrite time.Time) ([]CompactionGroup, int64) {
625627 }
626628
627629 // Skip the file if it's over the max size and it contains a full block
628- if gen .size () >= uint64 (tsdb .MaxTSMFileSize ) && c . FileStore . BlockCount ( gen .files [0 ].Path , 1 ) >= tsdb .DefaultMaxPointsPerBlock && ! gen .hasTombstones () {
630+ if gen .size () >= uint64 (tsdb .MaxTSMFileSize ) && gen .files [0 ].FirstBlockCount >= tsdb .DefaultMaxPointsPerBlock && ! gen .hasTombstones () {
629631 startIndex ++
630632 continue
631633 }
0 commit comments