-
Notifications
You must be signed in to change notification settings - Fork 790
/
compactor.go
468 lines (392 loc) · 17.4 KB
/
compactor.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
package compactor
import (
"context"
"flag"
"fmt"
"hash/fnv"
"path"
"strings"
"time"
"github.com/go-kit/kit/log"
"github.com/go-kit/kit/log/level"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/prometheus/tsdb"
tsdb_errors "github.com/prometheus/prometheus/tsdb/errors"
"github.com/thanos-io/thanos/pkg/block"
"github.com/thanos-io/thanos/pkg/compact"
"github.com/thanos-io/thanos/pkg/compact/downsample"
"github.com/thanos-io/thanos/pkg/objstore"
"github.com/cortexproject/cortex/pkg/ring"
cortex_tsdb "github.com/cortexproject/cortex/pkg/storage/tsdb"
"github.com/cortexproject/cortex/pkg/util"
"github.com/cortexproject/cortex/pkg/util/services"
)
// Config holds the Compactor config.
type Config struct {
BlockRanges cortex_tsdb.DurationList `yaml:"block_ranges"`
BlockSyncConcurrency int `yaml:"block_sync_concurrency"`
MetaSyncConcurrency int `yaml:"meta_sync_concurrency"`
ConsistencyDelay time.Duration `yaml:"consistency_delay"`
DataDir string `yaml:"data_dir"`
CompactionInterval time.Duration `yaml:"compaction_interval"`
CompactionRetries int `yaml:"compaction_retries"`
CompactionConcurrency int `yaml:"compaction_concurrency"`
DeletionDelay time.Duration `yaml:"deletion_delay"`
// Compactors sharding.
ShardingEnabled bool `yaml:"sharding_enabled"`
ShardingRing RingConfig `yaml:"sharding_ring"`
// No need to add options to customize the retry backoff,
// given the defaults should be fine, but allow to override
// it in tests.
retryMinBackoff time.Duration `yaml:"-"`
retryMaxBackoff time.Duration `yaml:"-"`
}
// RegisterFlags registers the Compactor flags.
func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
cfg.ShardingRing.RegisterFlags(f)
cfg.BlockRanges = cortex_tsdb.DurationList{2 * time.Hour, 12 * time.Hour, 24 * time.Hour}
cfg.retryMinBackoff = 10 * time.Second
cfg.retryMaxBackoff = time.Minute
f.Var(&cfg.BlockRanges, "compactor.block-ranges", "List of compaction time ranges.")
f.DurationVar(&cfg.ConsistencyDelay, "compactor.consistency-delay", 0, fmt.Sprintf("Minimum age of fresh (non-compacted) blocks before they are being processed. Malformed blocks older than the maximum of consistency-delay and %s will be removed.", compact.PartialUploadThresholdAge))
f.IntVar(&cfg.BlockSyncConcurrency, "compactor.block-sync-concurrency", 20, "Number of Go routines to use when syncing block index and chunks files from the long term storage.")
f.IntVar(&cfg.MetaSyncConcurrency, "compactor.meta-sync-concurrency", 20, "Number of Go routines to use when syncing block meta files from the long term storage.")
f.StringVar(&cfg.DataDir, "compactor.data-dir", "./data", "Data directory in which to cache blocks and process compactions")
f.DurationVar(&cfg.CompactionInterval, "compactor.compaction-interval", time.Hour, "The frequency at which the compaction runs")
f.IntVar(&cfg.CompactionRetries, "compactor.compaction-retries", 3, "How many times to retry a failed compaction during a single compaction interval")
f.IntVar(&cfg.CompactionConcurrency, "compactor.compaction-concurrency", 1, "Max number of concurrent compactions running.")
f.BoolVar(&cfg.ShardingEnabled, "compactor.sharding-enabled", false, "Shard tenants across multiple compactor instances. Sharding is required if you run multiple compactor instances, in order to coordinate compactions and avoid race conditions leading to the same tenant blocks simultaneously compacted by different instances.")
f.DurationVar(&cfg.DeletionDelay, "compactor.deletion-delay", 12*time.Hour, "Time before a block marked for deletion is deleted from bucket. "+
"If not 0, blocks will be marked for deletion and compactor component will delete blocks marked for deletion from the bucket. "+
"If delete-delay is 0, blocks will be deleted straight away. Note that deleting blocks immediately can cause query failures, "+
"if store gateway still has the block loaded, or compactor is ignoring the deletion because it's compacting the block at the same time.")
}
// Compactor is a multi-tenant TSDB blocks compactor based on Thanos.
type Compactor struct {
services.Service
compactorCfg Config
storageCfg cortex_tsdb.Config
logger log.Logger
parentLogger log.Logger
registerer prometheus.Registerer
// Function that creates bucket client and TSDB compactor using the context.
// Useful for injecting mock objects from tests.
createBucketClientAndTsdbCompactor func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, error)
// Users scanner, used to discover users from the bucket.
usersScanner *UsersScanner
// Blocks cleaner is responsible to hard delete blocks marked for deletion.
blocksCleaner *BlocksCleaner
// Underlying compactor used to compact TSDB blocks.
tsdbCompactor tsdb.Compactor
// Client used to run operations on the bucket storing blocks.
bucketClient objstore.Bucket
// Ring used for sharding compactions.
ringLifecycler *ring.Lifecycler
ring *ring.Ring
ringSubservices *services.Manager
ringSubservicesWatcher *services.FailureWatcher
// Metrics.
compactionRunsStarted prometheus.Counter
compactionRunsCompleted prometheus.Counter
compactionRunsFailed prometheus.Counter
compactionRunsLastSuccess prometheus.Gauge
blocksMarkedForDeletion prometheus.Counter
garbageCollectedBlocks prometheus.Counter
// TSDB syncer metrics
syncerMetrics *syncerMetrics
}
// NewCompactor makes a new Compactor.
func NewCompactor(compactorCfg Config, storageCfg cortex_tsdb.Config, logger log.Logger, registerer prometheus.Registerer) (*Compactor, error) {
createBucketClientAndTsdbCompactor := func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, error) {
bucketClient, err := cortex_tsdb.NewBucketClient(ctx, storageCfg, "compactor", logger, registerer)
if err != nil {
return nil, nil, errors.Wrap(err, "failed to create the bucket client")
}
compactor, err := tsdb.NewLeveledCompactor(ctx, registerer, logger, compactorCfg.BlockRanges.ToMilliseconds(), downsample.NewPool())
return bucketClient, compactor, err
}
cortexCompactor, err := newCompactor(compactorCfg, storageCfg, logger, registerer, createBucketClientAndTsdbCompactor)
if err != nil {
return nil, errors.Wrap(err, "failed to create Cortex blocks compactor")
}
return cortexCompactor, nil
}
func newCompactor(
compactorCfg Config,
storageCfg cortex_tsdb.Config,
logger log.Logger,
registerer prometheus.Registerer,
createBucketClientAndTsdbCompactor func(ctx context.Context) (objstore.Bucket, tsdb.Compactor, error),
) (*Compactor, error) {
c := &Compactor{
compactorCfg: compactorCfg,
storageCfg: storageCfg,
parentLogger: logger,
logger: log.With(logger, "component", "compactor"),
registerer: registerer,
syncerMetrics: newSyncerMetrics(registerer),
createBucketClientAndTsdbCompactor: createBucketClientAndTsdbCompactor,
compactionRunsStarted: promauto.With(registerer).NewCounter(prometheus.CounterOpts{
Name: "cortex_compactor_runs_started_total",
Help: "Total number of compaction runs started.",
}),
compactionRunsCompleted: promauto.With(registerer).NewCounter(prometheus.CounterOpts{
Name: "cortex_compactor_runs_completed_total",
Help: "Total number of compaction runs successfully completed.",
}),
compactionRunsFailed: promauto.With(registerer).NewCounter(prometheus.CounterOpts{
Name: "cortex_compactor_runs_failed_total",
Help: "Total number of compaction runs failed.",
}),
compactionRunsLastSuccess: promauto.With(registerer).NewGauge(prometheus.GaugeOpts{
Name: "cortex_compactor_last_successful_run_timestamp_seconds",
Help: "Unix timestamp of the last successful compaction run.",
}),
blocksMarkedForDeletion: promauto.With(registerer).NewCounter(prometheus.CounterOpts{
Name: "cortex_compactor_blocks_marked_for_deletion_total",
Help: "Total number of blocks marked for deletion in compactor.",
}),
garbageCollectedBlocks: promauto.With(registerer).NewCounter(prometheus.CounterOpts{
Name: "cortex_compactor_garbage_collected_blocks_total",
Help: "Total number of blocks marked for deletion by compactor.",
}),
}
c.Service = services.NewBasicService(c.starting, c.running, c.stopping)
return c, nil
}
// Start the compactor.
func (c *Compactor) starting(ctx context.Context) error {
var err error
// Create bucket client and compactor.
c.bucketClient, c.tsdbCompactor, err = c.createBucketClientAndTsdbCompactor(ctx)
if err != nil {
return errors.Wrap(err, "failed to initialize compactor objects")
}
// Create the users scanner.
c.usersScanner = NewUsersScanner(c.bucketClient, c.ownUser, c.parentLogger)
// Initialize the compactors ring if sharding is enabled.
if c.compactorCfg.ShardingEnabled {
lifecyclerCfg := c.compactorCfg.ShardingRing.ToLifecyclerConfig()
c.ringLifecycler, err = ring.NewLifecycler(lifecyclerCfg, ring.NewNoopFlushTransferer(), "compactor", ring.CompactorRingKey, false, c.registerer)
if err != nil {
return errors.Wrap(err, "unable to initialize compactor ring lifecycler")
}
c.ring, err = ring.New(lifecyclerCfg.RingConfig, "compactor", ring.CompactorRingKey, c.registerer)
if err != nil {
return errors.Wrap(err, "unable to initialize compactor ring")
}
c.ringSubservices, err = services.NewManager(c.ringLifecycler, c.ring)
if err == nil {
c.ringSubservicesWatcher = services.NewFailureWatcher()
c.ringSubservicesWatcher.WatchManager(c.ringSubservices)
err = services.StartManagerAndAwaitHealthy(ctx, c.ringSubservices)
}
if err != nil {
return errors.Wrap(err, "unable to start compactor ring dependencies")
}
// If sharding is enabled we should wait until this instance is
// ACTIVE within the ring. This MUST be done before starting the
// any other component depending on the users scanner, because the
// users scanner depends on the ring (to check whether an user belongs
// to this shard or not).
level.Info(c.logger).Log("msg", "waiting until compactor is ACTIVE in the ring")
if err := ring.WaitInstanceState(ctx, c.ring, c.ringLifecycler.ID, ring.ACTIVE); err != nil {
return err
}
level.Info(c.logger).Log("msg", "compactor is ACTIVE in the ring")
}
// Create the blocks cleaner (service).
c.blocksCleaner = NewBlocksCleaner(BlocksCleanerConfig{
DataDir: c.compactorCfg.DataDir,
MetaSyncConcurrency: c.compactorCfg.MetaSyncConcurrency,
DeletionDelay: c.compactorCfg.DeletionDelay,
CleanupInterval: util.DurationWithJitter(c.compactorCfg.CompactionInterval, 0.05),
}, c.bucketClient, c.usersScanner, c.parentLogger, c.registerer)
// Ensure an initial cleanup occurred before starting the compactor.
if err := services.StartAndAwaitRunning(ctx, c.blocksCleaner); err != nil {
c.ringSubservices.StopAsync()
return errors.Wrap(err, "failed to start the blocks cleaner")
}
return nil
}
func (c *Compactor) stopping(_ error) error {
ctx := context.Background()
services.StopAndAwaitTerminated(ctx, c.blocksCleaner) //nolint:errcheck
if c.ringSubservices != nil {
return services.StopManagerAndAwaitStopped(ctx, c.ringSubservices)
}
return nil
}
func (c *Compactor) running(ctx context.Context) error {
// Run an initial compaction before starting the interval.
c.compactUsersWithRetries(ctx)
ticker := time.NewTicker(util.DurationWithJitter(c.compactorCfg.CompactionInterval, 0.05))
defer ticker.Stop()
for {
select {
case <-ticker.C:
c.compactUsersWithRetries(ctx)
case <-ctx.Done():
return nil
case err := <-c.ringSubservicesWatcher.Chan():
return errors.Wrap(err, "compactor subservice failed")
}
}
}
func (c *Compactor) compactUsersWithRetries(ctx context.Context) {
retries := util.NewBackoff(ctx, util.BackoffConfig{
MinBackoff: c.compactorCfg.retryMinBackoff,
MaxBackoff: c.compactorCfg.retryMaxBackoff,
MaxRetries: c.compactorCfg.CompactionRetries,
})
c.compactionRunsStarted.Inc()
for retries.Ongoing() {
if err := c.compactUsers(ctx); err == nil {
c.compactionRunsCompleted.Inc()
c.compactionRunsLastSuccess.SetToCurrentTime()
return
} else if errors.Is(err, context.Canceled) {
return
}
retries.Wait()
}
c.compactionRunsFailed.Inc()
}
func (c *Compactor) compactUsers(ctx context.Context) error {
level.Info(c.logger).Log("msg", "discovering users from bucket")
users, err := c.discoverUsers(ctx)
if err != nil {
level.Error(c.logger).Log("msg", "failed to discover users from bucket", "err", err)
return errors.Wrap(err, "failed to discover users from bucket")
}
level.Info(c.logger).Log("msg", "discovered users from bucket", "users", len(users))
errs := tsdb_errors.MultiError{}
for _, userID := range users {
// Ensure the context has not been canceled (ie. compactor shutdown has been triggered).
if ctx.Err() != nil {
level.Info(c.logger).Log("msg", "interrupting compaction of user blocks", "err", err)
return ctx.Err()
}
// If sharding is enabled, ensure the user ID belongs to our shard.
if c.compactorCfg.ShardingEnabled {
if owned, err := c.ownUser(userID); err != nil {
level.Warn(c.logger).Log("msg", "unable to check if user is owned by this shard", "user", userID, "err", err)
continue
} else if !owned {
level.Debug(c.logger).Log("msg", "skipping user because not owned by this shard", "user", userID)
continue
}
}
level.Info(c.logger).Log("msg", "starting compaction of user blocks", "user", userID)
if err = c.compactUser(ctx, userID); err != nil {
level.Error(c.logger).Log("msg", "failed to compact user blocks", "user", userID, "err", err)
errs.Add(errors.Wrapf(err, "failed to compact user blocks (user: %s)", userID))
continue
}
level.Info(c.logger).Log("msg", "successfully compacted user blocks", "user", userID)
}
return errs.Err()
}
func (c *Compactor) compactUser(ctx context.Context, userID string) error {
bucket := cortex_tsdb.NewUserBucketClient(userID, c.bucketClient)
reg := prometheus.NewRegistry()
defer c.syncerMetrics.gatherThanosSyncerMetrics(reg)
ulogger := util.WithUserID(userID, c.logger)
// Filters out duplicate blocks that can be formed from two or more overlapping
// blocks that fully submatches the source blocks of the older blocks.
deduplicateBlocksFilter := block.NewDeduplicateFilter()
// While fetching blocks, we filter out blocks that were marked for deletion by using IgnoreDeletionMarkFilter.
// The delay of deleteDelay/2 is added to ensure we fetch blocks that are meant to be deleted but do not have a replacement yet.
ignoreDeletionMarkFilter := block.NewIgnoreDeletionMarkFilter(ulogger, bucket, time.Duration(c.compactorCfg.DeletionDelay.Seconds()/2)*time.Second)
fetcher, err := block.NewMetaFetcher(
ulogger,
c.compactorCfg.MetaSyncConcurrency,
bucket,
// The fetcher stores cached metas in the "meta-syncer/" sub directory,
// but we prefix it with "compactor-meta-" in order to guarantee no clashing with
// the directory used by the Thanos Syncer, whatever is the user ID.
path.Join(c.compactorCfg.DataDir, "compactor-meta-"+userID),
reg,
// List of filters to apply (order matters).
[]block.MetadataFilter{
// Remove the ingester ID because we don't shard blocks anymore, while still
// honoring the shard ID if sharding was done in the past.
NewLabelRemoverFilter([]string{cortex_tsdb.IngesterIDExternalLabel}),
block.NewConsistencyDelayMetaFilter(ulogger, c.compactorCfg.ConsistencyDelay, reg),
ignoreDeletionMarkFilter,
deduplicateBlocksFilter,
},
nil,
)
if err != nil {
return err
}
syncer, err := compact.NewSyncer(
ulogger,
reg,
bucket,
fetcher,
deduplicateBlocksFilter,
ignoreDeletionMarkFilter,
c.blocksMarkedForDeletion,
c.garbageCollectedBlocks,
c.compactorCfg.BlockSyncConcurrency,
)
if err != nil {
return errors.Wrap(err, "failed to create syncer")
}
grouper := compact.NewDefaultGrouper(
ulogger,
bucket,
false, // Do not accept malformed indexes
true, // Enable vertical compaction
reg,
c.blocksMarkedForDeletion,
c.garbageCollectedBlocks,
)
compactor, err := compact.NewBucketCompactor(
ulogger,
syncer,
grouper,
c.tsdbCompactor,
path.Join(c.compactorCfg.DataDir, "compact"),
bucket,
c.compactorCfg.CompactionConcurrency,
)
if err != nil {
return errors.Wrap(err, "failed to create bucket compactor")
}
if err := compactor.Compact(ctx); err != nil {
return errors.Wrap(err, "compaction")
}
return nil
}
func (c *Compactor) discoverUsers(ctx context.Context) ([]string, error) {
var users []string
err := c.bucketClient.Iter(ctx, "", func(entry string) error {
users = append(users, strings.TrimSuffix(entry, "/"))
return nil
})
return users, err
}
func (c *Compactor) ownUser(userID string) (bool, error) {
// Always owned if sharding is disabled.
if !c.compactorCfg.ShardingEnabled {
return true, nil
}
// Hash the user ID.
hasher := fnv.New32a()
_, _ = hasher.Write([]byte(userID))
userHash := hasher.Sum32()
// Check whether this compactor instance owns the user.
rs, err := c.ring.Get(userHash, ring.Read, []ring.IngesterDesc{})
if err != nil {
return false, err
}
if len(rs.Ingesters) != 1 {
return false, fmt.Errorf("unexpected number of compactors in the shard (expected 1, got %d)", len(rs.Ingesters))
}
return rs.Ingesters[0].Addr == c.ringLifecycler.Addr, nil
}