forked from grafana/loki
-
Notifications
You must be signed in to change notification settings - Fork 0
/
ingester.go
1038 lines (876 loc) · 34.3 KB
/
ingester.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
package ingester
import (
"context"
"flag"
"fmt"
"net/http"
"os"
"path/filepath"
"sync"
"time"
"github.com/go-kit/log/level"
"github.com/grafana/dskit/concurrency"
"github.com/grafana/dskit/modules"
"github.com/grafana/dskit/ring"
"github.com/grafana/dskit/services"
"github.com/grafana/dskit/tenant"
"github.com/pkg/errors"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/common/model"
"github.com/prometheus/prometheus/model/labels"
"google.golang.org/grpc/health/grpc_health_v1"
"github.com/grafana/loki/pkg/chunkenc"
"github.com/grafana/loki/pkg/ingester/client"
"github.com/grafana/loki/pkg/ingester/index"
"github.com/grafana/loki/pkg/iter"
"github.com/grafana/loki/pkg/logproto"
"github.com/grafana/loki/pkg/logql"
"github.com/grafana/loki/pkg/logql/syntax"
"github.com/grafana/loki/pkg/logqlmodel/stats"
"github.com/grafana/loki/pkg/runtime"
"github.com/grafana/loki/pkg/storage"
"github.com/grafana/loki/pkg/storage/chunk"
"github.com/grafana/loki/pkg/storage/chunk/fetcher"
"github.com/grafana/loki/pkg/storage/config"
index_stats "github.com/grafana/loki/pkg/storage/stores/index/stats"
"github.com/grafana/loki/pkg/usagestats"
"github.com/grafana/loki/pkg/util"
errUtil "github.com/grafana/loki/pkg/util"
util_log "github.com/grafana/loki/pkg/util/log"
"github.com/grafana/loki/pkg/util/wal"
"github.com/grafana/loki/pkg/validation"
)
const (
// RingKey is the key under which we store the ingesters ring in the KVStore.
RingKey = "ring"
)
// ErrReadOnly is returned when the ingester is shutting down and a push was
// attempted.
var (
ErrReadOnly = errors.New("Ingester is shutting down")
flushQueueLength = promauto.NewGauge(prometheus.GaugeOpts{
Name: "cortex_ingester_flush_queue_length",
Help: "The total number of series pending in the flush queue.",
})
compressionStats = usagestats.NewString("ingester_compression")
targetSizeStats = usagestats.NewInt("ingester_target_size_bytes")
walStats = usagestats.NewString("ingester_wal")
activeTenantsStats = usagestats.NewInt("ingester_active_tenants")
)
// Config for an ingester.
type Config struct {
LifecyclerConfig ring.LifecyclerConfig `yaml:"lifecycler,omitempty"`
// Config for transferring chunks.
MaxTransferRetries int `yaml:"max_transfer_retries,omitempty"`
ConcurrentFlushes int `yaml:"concurrent_flushes"`
FlushCheckPeriod time.Duration `yaml:"flush_check_period"`
FlushOpTimeout time.Duration `yaml:"flush_op_timeout"`
RetainPeriod time.Duration `yaml:"chunk_retain_period"`
MaxChunkIdle time.Duration `yaml:"chunk_idle_period"`
BlockSize int `yaml:"chunk_block_size"`
TargetChunkSize int `yaml:"chunk_target_size"`
ChunkEncoding string `yaml:"chunk_encoding"`
parsedEncoding chunkenc.Encoding `yaml:"-"` // placeholder for validated encoding
MaxChunkAge time.Duration `yaml:"max_chunk_age"`
AutoForgetUnhealthy bool `yaml:"autoforget_unhealthy"`
// Synchronization settings. Used to make sure that ingesters cut their chunks at the same moments.
SyncPeriod time.Duration `yaml:"sync_period"`
SyncMinUtilization float64 `yaml:"sync_min_utilization"`
MaxReturnedErrors int `yaml:"max_returned_stream_errors"`
// For testing, you can override the address and ID of this ingester.
ingesterClientFactory func(cfg client.Config, addr string) (client.HealthAndIngesterClient, error)
QueryStore bool `yaml:"-"`
QueryStoreMaxLookBackPeriod time.Duration `yaml:"query_store_max_look_back_period"`
WAL WALConfig `yaml:"wal,omitempty"`
ChunkFilterer chunk.RequestChunkFilterer `yaml:"-"`
// Optional wrapper that can be used to modify the behaviour of the ingester
Wrapper Wrapper `yaml:"-"`
IndexShards int `yaml:"index_shards"`
MaxDroppedStreams int `yaml:"max_dropped_streams"`
}
// RegisterFlags registers the flags.
func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
cfg.LifecyclerConfig.RegisterFlags(f, util_log.Logger)
cfg.WAL.RegisterFlags(f)
f.IntVar(&cfg.MaxTransferRetries, "ingester.max-transfer-retries", 0, "Number of times to try and transfer chunks before falling back to flushing. If set to 0 or negative value, transfers are disabled.")
f.IntVar(&cfg.ConcurrentFlushes, "ingester.concurrent-flushes", 32, "")
f.DurationVar(&cfg.FlushCheckPeriod, "ingester.flush-check-period", 30*time.Second, "")
f.DurationVar(&cfg.FlushOpTimeout, "ingester.flush-op-timeout", 10*time.Minute, "")
f.DurationVar(&cfg.RetainPeriod, "ingester.chunks-retain-period", 0, "")
f.DurationVar(&cfg.MaxChunkIdle, "ingester.chunks-idle-period", 30*time.Minute, "")
f.IntVar(&cfg.BlockSize, "ingester.chunks-block-size", 256*1024, "")
f.IntVar(&cfg.TargetChunkSize, "ingester.chunk-target-size", 1572864, "") // 1.5 MB
f.StringVar(&cfg.ChunkEncoding, "ingester.chunk-encoding", chunkenc.EncGZIP.String(), fmt.Sprintf("The algorithm to use for compressing chunk. (%s)", chunkenc.SupportedEncoding()))
f.DurationVar(&cfg.SyncPeriod, "ingester.sync-period", 0, "How often to cut chunks to synchronize ingesters.")
f.Float64Var(&cfg.SyncMinUtilization, "ingester.sync-min-utilization", 0, "Minimum utilization of chunk when doing synchronization.")
f.IntVar(&cfg.MaxReturnedErrors, "ingester.max-ignored-stream-errors", 10, "Maximum number of ignored stream errors to return. 0 to return all errors.")
f.DurationVar(&cfg.MaxChunkAge, "ingester.max-chunk-age", 2*time.Hour, "Maximum chunk age before flushing.")
f.DurationVar(&cfg.QueryStoreMaxLookBackPeriod, "ingester.query-store-max-look-back-period", 0, "How far back should an ingester be allowed to query the store for data, for use only with boltdb-shipper/tsdb index and filesystem object store. -1 for infinite.")
f.BoolVar(&cfg.AutoForgetUnhealthy, "ingester.autoforget-unhealthy", false, "Enable to remove unhealthy ingesters from the ring after `ring.kvstore.heartbeat_timeout`")
f.IntVar(&cfg.IndexShards, "ingester.index-shards", index.DefaultIndexShards, "Shard factor used in the ingesters for the in process reverse index. This MUST be evenly divisible by ALL schema shard factors or Loki will not start.")
f.IntVar(&cfg.MaxDroppedStreams, "ingester.tailer.max-dropped-streams", 10, "Maximum number of dropped streams to keep in memory during tailing")
}
func (cfg *Config) Validate() error {
enc, err := chunkenc.ParseEncoding(cfg.ChunkEncoding)
if err != nil {
return err
}
cfg.parsedEncoding = enc
if err = cfg.WAL.Validate(); err != nil {
return err
}
if cfg.MaxTransferRetries > 0 && cfg.WAL.Enabled {
return errors.New("the use of the write ahead log (WAL) is incompatible with chunk transfers. It's suggested to use the WAL. Please try setting ingester.max-transfer-retries to 0 to disable transfers")
}
if cfg.IndexShards <= 0 {
return fmt.Errorf("invalid ingester index shard factor: %d", cfg.IndexShards)
}
return nil
}
type Wrapper interface {
Wrap(wrapped Interface) Interface
}
// ChunkStore is the interface we need to store chunks.
type ChunkStore interface {
Put(ctx context.Context, chunks []chunk.Chunk) error
SelectLogs(ctx context.Context, req logql.SelectLogParams) (iter.EntryIterator, error)
SelectSamples(ctx context.Context, req logql.SelectSampleParams) (iter.SampleIterator, error)
GetChunkRefs(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) ([][]chunk.Chunk, []*fetcher.Fetcher, error)
GetSchemaConfigs() []config.PeriodConfig
Stats(ctx context.Context, userID string, from, through model.Time, matchers ...*labels.Matcher) (*index_stats.Stats, error)
}
// Interface is an interface for the Ingester
type Interface interface {
services.Service
logproto.IngesterServer
logproto.PusherServer
logproto.QuerierServer
logproto.StreamDataServer
CheckReady(ctx context.Context) error
FlushHandler(w http.ResponseWriter, _ *http.Request)
GetOrCreateInstance(instanceID string) (*instance, error)
// deprecated
LegacyShutdownHandler(w http.ResponseWriter, r *http.Request)
ShutdownHandler(w http.ResponseWriter, r *http.Request)
}
// Ingester builds chunks for incoming log streams.
type Ingester struct {
services.Service
cfg Config
clientConfig client.Config
tenantConfigs *runtime.TenantConfigs
shutdownMtx sync.Mutex // Allows processes to grab a lock and prevent a shutdown
instancesMtx sync.RWMutex
instances map[string]*instance
readonly bool
lifecycler *ring.Lifecycler
lifecyclerWatcher *services.FailureWatcher
store ChunkStore
periodicConfigs []config.PeriodConfig
loopDone sync.WaitGroup
loopQuit chan struct{}
tailersQuit chan struct{}
// One queue per flush thread. Fingerprint is used to
// pick a queue.
flushQueues []*util.PriorityQueue
flushQueuesDone sync.WaitGroup
limiter *Limiter
// Denotes whether the ingester should flush on shutdown.
// Currently only used by the WAL to signal when the disk is full.
flushOnShutdownSwitch *OnceSwitch
// Flag for whether stopping the ingester service should also terminate the
// loki process.
// This is set when calling the shutdown handler.
terminateOnShutdown bool
// Only used by WAL & flusher to coordinate backpressure during replay.
replayController *replayController
metrics *ingesterMetrics
wal WAL
chunkFilter chunk.RequestChunkFilterer
}
// New makes a new Ingester.
func New(cfg Config, clientConfig client.Config, store ChunkStore, limits *validation.Overrides, configs *runtime.TenantConfigs, registerer prometheus.Registerer) (*Ingester, error) {
if cfg.ingesterClientFactory == nil {
cfg.ingesterClientFactory = client.New
}
compressionStats.Set(cfg.ChunkEncoding)
targetSizeStats.Set(int64(cfg.TargetChunkSize))
walStats.Set("disabled")
if cfg.WAL.Enabled {
walStats.Set("enabled")
}
metrics := newIngesterMetrics(registerer)
i := &Ingester{
cfg: cfg,
clientConfig: clientConfig,
tenantConfigs: configs,
instances: map[string]*instance{},
store: store,
periodicConfigs: store.GetSchemaConfigs(),
loopQuit: make(chan struct{}),
flushQueues: make([]*util.PriorityQueue, cfg.ConcurrentFlushes),
tailersQuit: make(chan struct{}),
metrics: metrics,
flushOnShutdownSwitch: &OnceSwitch{},
terminateOnShutdown: false,
}
i.replayController = newReplayController(metrics, cfg.WAL, &replayFlusher{i})
if cfg.WAL.Enabled {
if err := os.MkdirAll(cfg.WAL.Dir, os.ModePerm); err != nil {
// Best effort try to make path absolute for easier debugging.
path, _ := filepath.Abs(cfg.WAL.Dir)
if path == "" {
path = cfg.WAL.Dir
}
return nil, fmt.Errorf("creating WAL folder at %q: %w", path, err)
}
}
wal, err := newWAL(cfg.WAL, registerer, metrics, newIngesterSeriesIter(i))
if err != nil {
return nil, err
}
i.wal = wal
i.lifecycler, err = ring.NewLifecycler(cfg.LifecyclerConfig, i, "ingester", RingKey, !cfg.WAL.Enabled || cfg.WAL.FlushOnShutdown, util_log.Logger, prometheus.WrapRegistererWithPrefix("cortex_", registerer))
if err != nil {
return nil, err
}
i.lifecyclerWatcher = services.NewFailureWatcher()
i.lifecyclerWatcher.WatchService(i.lifecycler)
// Now that the lifecycler has been created, we can create the limiter
// which depends on it.
i.limiter = NewLimiter(limits, metrics, i.lifecycler, cfg.LifecyclerConfig.RingConfig.ReplicationFactor)
i.Service = services.NewBasicService(i.starting, i.running, i.stopping)
i.setupAutoForget()
if i.cfg.ChunkFilterer != nil {
i.SetChunkFilterer(i.cfg.ChunkFilterer)
}
return i, nil
}
func (i *Ingester) SetChunkFilterer(chunkFilter chunk.RequestChunkFilterer) {
i.chunkFilter = chunkFilter
}
// setupAutoForget looks for ring status if `AutoForgetUnhealthy` is enabled
// when enabled, unhealthy ingesters that reach `ring.kvstore.heartbeat_timeout` are removed from the ring every `HeartbeatPeriod`
func (i *Ingester) setupAutoForget() {
if !i.cfg.AutoForgetUnhealthy {
return
}
go func() {
ctx := context.Background()
err := i.Service.AwaitRunning(ctx)
if err != nil {
level.Error(util_log.Logger).Log("msg", fmt.Sprintf("autoforget received error %s, autoforget is disabled", err.Error()))
return
}
level.Info(util_log.Logger).Log("msg", fmt.Sprintf("autoforget is enabled and will remove unhealthy instances from the ring after %v with no heartbeat", i.cfg.LifecyclerConfig.RingConfig.HeartbeatTimeout))
ticker := time.NewTicker(i.cfg.LifecyclerConfig.HeartbeatPeriod)
defer ticker.Stop()
var forgetList []string
for range ticker.C {
err := i.lifecycler.KVStore.CAS(ctx, RingKey, func(in interface{}) (out interface{}, retry bool, err error) {
forgetList = forgetList[:0]
if in == nil {
return nil, false, nil
}
ringDesc, ok := in.(*ring.Desc)
if !ok {
level.Warn(util_log.Logger).Log("msg", fmt.Sprintf("autoforget saw a KV store value that was not `ring.Desc`, got `%T`", in))
return nil, false, nil
}
for id, ingester := range ringDesc.Ingesters {
if !ingester.IsHealthy(ring.Reporting, i.cfg.LifecyclerConfig.RingConfig.HeartbeatTimeout, time.Now()) {
if i.lifecycler.ID == id {
level.Warn(util_log.Logger).Log("msg", fmt.Sprintf("autoforget has seen our ID `%s` as unhealthy in the ring, network may be partitioned, skip forgeting ingesters this round", id))
return nil, false, nil
}
forgetList = append(forgetList, id)
}
}
if len(forgetList) == len(ringDesc.Ingesters)-1 {
level.Warn(util_log.Logger).Log("msg", fmt.Sprintf("autoforget have seen %d unhealthy ingesters out of %d, network may be partioned, skip forgeting ingesters this round", len(forgetList), len(ringDesc.Ingesters)))
forgetList = forgetList[:0]
return nil, false, nil
}
if len(forgetList) > 0 {
for _, id := range forgetList {
ringDesc.RemoveIngester(id)
}
return ringDesc, true, nil
}
return nil, false, nil
})
if err != nil {
level.Warn(util_log.Logger).Log("msg", err)
continue
}
for _, id := range forgetList {
level.Info(util_log.Logger).Log("msg", fmt.Sprintf("autoforget removed ingester %v from the ring because it was not healthy after %v", id, i.cfg.LifecyclerConfig.RingConfig.HeartbeatTimeout))
}
i.metrics.autoForgetUnhealthyIngestersTotal.Add(float64(len(forgetList)))
}
}()
}
func (i *Ingester) starting(ctx context.Context) error {
if i.cfg.WAL.Enabled {
start := time.Now()
// Ignore retain period during wal replay.
oldRetain := i.cfg.RetainPeriod
i.cfg.RetainPeriod = 0
// Disable the in process stream limit checks while replaying the WAL.
// It is re-enabled in the recover's Close() method.
i.limiter.DisableForWALReplay()
recoverer := newIngesterRecoverer(i)
i.metrics.walReplayActive.Set(1)
endReplay := func() func() {
var once sync.Once
return func() {
once.Do(func() {
level.Info(util_log.Logger).Log("msg", "closing recoverer")
recoverer.Close()
elapsed := time.Since(start)
i.metrics.walReplayActive.Set(0)
i.metrics.walReplayDuration.Set(elapsed.Seconds())
i.cfg.RetainPeriod = oldRetain
level.Info(util_log.Logger).Log("msg", "WAL recovery finished", "time", elapsed.String())
})
}
}()
defer endReplay()
level.Info(util_log.Logger).Log("msg", "recovering from checkpoint")
checkpointReader, checkpointCloser, err := newCheckpointReader(i.cfg.WAL.Dir)
if err != nil {
return err
}
defer checkpointCloser.Close()
checkpointRecoveryErr := RecoverCheckpoint(checkpointReader, recoverer)
if checkpointRecoveryErr != nil {
i.metrics.walCorruptionsTotal.WithLabelValues(walTypeCheckpoint).Inc()
level.Error(util_log.Logger).Log(
"msg",
`Recovered from checkpoint with errors. Some streams were likely not recovered due to WAL checkpoint file corruptions (or WAL file deletions while Loki is running). No administrator action is needed and data loss is only a possibility if more than (replication factor / 2 + 1) ingesters suffer from this.`,
"elapsed", time.Since(start).String(),
)
}
level.Info(util_log.Logger).Log(
"msg", "recovered WAL checkpoint recovery finished",
"elapsed", time.Since(start).String(),
"errors", checkpointRecoveryErr != nil,
)
level.Info(util_log.Logger).Log("msg", "recovering from WAL")
segmentReader, segmentCloser, err := wal.NewWalReader(i.cfg.WAL.Dir, -1)
if err != nil {
return err
}
defer segmentCloser.Close()
segmentRecoveryErr := RecoverWAL(segmentReader, recoverer)
if segmentRecoveryErr != nil {
i.metrics.walCorruptionsTotal.WithLabelValues(walTypeSegment).Inc()
level.Error(util_log.Logger).Log(
"msg",
"Recovered from WAL segments with errors. Some streams and/or entries were likely not recovered due to WAL segment file corruptions (or WAL file deletions while Loki is running). No administrator action is needed and data loss is only a possibility if more than (replication factor / 2 + 1) ingesters suffer from this.",
"elapsed", time.Since(start).String(),
)
}
level.Info(util_log.Logger).Log(
"msg", "WAL segment recovery finished",
"elapsed", time.Since(start).String(),
"errors", segmentRecoveryErr != nil,
)
endReplay()
i.wal.Start()
}
i.InitFlushQueues()
// pass new context to lifecycler, so that it doesn't stop automatically when Ingester's service context is done
err := i.lifecycler.StartAsync(context.Background())
if err != nil {
return err
}
err = i.lifecycler.AwaitRunning(ctx)
if err != nil {
return err
}
// start our loop
i.loopDone.Add(1)
go i.loop()
return nil
}
func (i *Ingester) running(ctx context.Context) error {
var serviceError error
select {
// wait until service is asked to stop
case <-ctx.Done():
// stop
case err := <-i.lifecyclerWatcher.Chan():
serviceError = fmt.Errorf("lifecycler failed: %w", err)
}
// close tailers before stopping our loop
close(i.tailersQuit)
for _, instance := range i.getInstances() {
instance.closeTailers()
}
close(i.loopQuit)
i.loopDone.Wait()
return serviceError
}
// Called after running exits, when Ingester transitions to Stopping state.
// At this point, loop no longer runs, but flushers are still running.
func (i *Ingester) stopping(_ error) error {
i.stopIncomingRequests()
var errs errUtil.MultiError
errs.Add(i.wal.Stop())
if i.flushOnShutdownSwitch.Get() {
i.lifecycler.SetFlushOnShutdown(true)
}
errs.Add(services.StopAndAwaitTerminated(context.Background(), i.lifecycler))
// Normally, flushers are stopped via lifecycler (in transferOut), but if lifecycler fails,
// we better stop them.
for _, flushQueue := range i.flushQueues {
flushQueue.Close()
}
i.flushQueuesDone.Wait()
// In case the flag to terminate on shutdown is set we need to mark the
// ingester service as "failed", so Loki will shut down entirely.
// The module manager logs the failure `modules.ErrStopProcess` in a special way.
if i.terminateOnShutdown && errs.Err() == nil {
return modules.ErrStopProcess
}
return errs.Err()
}
func (i *Ingester) loop() {
defer i.loopDone.Done()
flushTicker := time.NewTicker(i.cfg.FlushCheckPeriod)
defer flushTicker.Stop()
for {
select {
case <-flushTicker.C:
i.sweepUsers(false, true)
case <-i.loopQuit:
return
}
}
}
// LegacyShutdownHandler triggers the following set of operations in order:
// - Change the state of ring to stop accepting writes.
// - Flush all the chunks.
//
// Note: This handler does not trigger a termination of the Loki process,
// despite its name. Instead, the ingester service is stopped, so an external
// source can trigger a safe termination through a signal to the process.
// The handler is deprecated and usage is discouraged. Use ShutdownHandler
// instead.
func (i *Ingester) LegacyShutdownHandler(w http.ResponseWriter, r *http.Request) {
level.Warn(util_log.Logger).Log("msg", "The handler /ingester/flush_shutdown is deprecated and usage is discouraged. Please use /ingester/shutdown?flush=true instead.")
originalState := i.lifecycler.FlushOnShutdown()
// We want to flush the chunks if transfer fails irrespective of original flag.
i.lifecycler.SetFlushOnShutdown(true)
_ = services.StopAndAwaitTerminated(context.Background(), i)
i.lifecycler.SetFlushOnShutdown(originalState)
w.WriteHeader(http.StatusNoContent)
}
// ShutdownHandler handles a graceful shutdown of the ingester service and
// termination of the Loki process.
func (i *Ingester) ShutdownHandler(w http.ResponseWriter, r *http.Request) {
// Don't allow calling the shutdown handler multiple times
if i.State() != services.Running {
w.WriteHeader(http.StatusServiceUnavailable)
_, _ = w.Write([]byte("Ingester is stopping or already stopped."))
return
}
params := r.URL.Query()
doFlush := util.FlagFromValues(params, "flush", true)
doDeleteRingTokens := util.FlagFromValues(params, "delete_ring_tokens", false)
doTerminate := util.FlagFromValues(params, "terminate", true)
err := i.handleShutdown(doTerminate, doFlush, doDeleteRingTokens)
// Stopping the module will return the modules.ErrStopProcess error. This is
// needed so the Loki process is shut down completely.
if err == nil || err == modules.ErrStopProcess {
w.WriteHeader(http.StatusNoContent)
} else {
w.WriteHeader(http.StatusInternalServerError)
_, _ = w.Write([]byte(err.Error()))
}
}
// handleShutdown triggers the following operations:
// - Change the state of ring to stop accepting writes.
// - optional: Flush all the chunks.
// - optional: Delete ring tokens file
// - Unregister from KV store
// - optional: Terminate process (handled by service manager in loki.go)
func (i *Ingester) handleShutdown(terminate, flush, del bool) error {
i.lifecycler.SetFlushOnShutdown(flush)
i.lifecycler.SetClearTokensOnShutdown(del)
i.lifecycler.SetUnregisterOnShutdown(true)
i.terminateOnShutdown = terminate
return services.StopAndAwaitTerminated(context.Background(), i)
}
// Push implements logproto.Pusher.
func (i *Ingester) Push(ctx context.Context, req *logproto.PushRequest) (*logproto.PushResponse, error) {
instanceID, err := tenant.TenantID(ctx)
if err != nil {
return nil, err
} else if i.readonly {
return nil, ErrReadOnly
}
instance, err := i.GetOrCreateInstance(instanceID)
if err != nil {
return &logproto.PushResponse{}, err
}
err = instance.Push(ctx, req)
return &logproto.PushResponse{}, err
}
// GetStreamRates returns a response containing all streams and their current rate
// TODO: It might be nice for this to be human readable, eventually: Sort output and return labels, too?
func (i *Ingester) GetStreamRates(ctx context.Context, req *logproto.StreamRatesRequest) (*logproto.StreamRatesResponse, error) {
instanceID, err := tenant.TenantID(ctx)
if err != nil {
return nil, err
} else if i.readonly {
return nil, ErrReadOnly
}
instance, err := i.GetOrCreateInstance(instanceID)
if err != nil {
return &logproto.StreamRatesResponse{}, err
}
return instance.GetStreamRates(ctx, req)
}
func (i *Ingester) GetOrCreateInstance(instanceID string) (*instance, error) { //nolint:revive
inst, ok := i.getInstanceByID(instanceID)
if ok {
return inst, nil
}
i.instancesMtx.Lock()
defer i.instancesMtx.Unlock()
inst, ok = i.instances[instanceID]
if !ok {
var err error
inst, err = newInstance(&i.cfg, i.periodicConfigs, instanceID, i.limiter, i.tenantConfigs, i.wal, i.metrics, i.flushOnShutdownSwitch, i.chunkFilter)
if err != nil {
return nil, err
}
i.instances[instanceID] = inst
activeTenantsStats.Set(int64(len(i.instances)))
}
return inst, nil
}
// Query the ingests for log streams matching a set of matchers.
func (i *Ingester) Query(req *logproto.QueryRequest, queryServer logproto.Querier_QueryServer) error {
// initialize stats collection for ingester queries.
_, ctx := stats.NewContext(queryServer.Context())
instanceID, err := tenant.TenantID(ctx)
if err != nil {
return err
}
instance, err := i.GetOrCreateInstance(instanceID)
if err != nil {
return err
}
it, err := instance.Query(ctx, logql.SelectLogParams{QueryRequest: req})
if err != nil {
return err
}
if start, end, ok := buildStoreRequest(i.cfg, req.Start, req.End, time.Now()); ok {
storeReq := logql.SelectLogParams{QueryRequest: &logproto.QueryRequest{
Selector: req.Selector,
Direction: req.Direction,
Start: start,
End: end,
Limit: req.Limit,
Shards: req.Shards,
Deletes: req.Deletes,
}}
storeItr, err := i.store.SelectLogs(ctx, storeReq)
if err != nil {
errUtil.LogErrorWithContext(ctx, "closing iterator", it.Close)
return err
}
it = iter.NewMergeEntryIterator(ctx, []iter.EntryIterator{it, storeItr}, req.Direction)
}
defer errUtil.LogErrorWithContext(ctx, "closing iterator", it.Close)
// sendBatches uses -1 to specify no limit.
batchLimit := int32(req.Limit)
if batchLimit == 0 {
batchLimit = -1
}
return sendBatches(ctx, it, queryServer, batchLimit)
}
// QuerySample the ingesters for series from logs matching a set of matchers.
func (i *Ingester) QuerySample(req *logproto.SampleQueryRequest, queryServer logproto.Querier_QuerySampleServer) error {
// initialize stats collection for ingester queries.
_, ctx := stats.NewContext(queryServer.Context())
instanceID, err := tenant.TenantID(ctx)
if err != nil {
return err
}
instance, err := i.GetOrCreateInstance(instanceID)
if err != nil {
return err
}
it, err := instance.QuerySample(ctx, logql.SelectSampleParams{SampleQueryRequest: req})
if err != nil {
return err
}
if start, end, ok := buildStoreRequest(i.cfg, req.Start, req.End, time.Now()); ok {
storeReq := logql.SelectSampleParams{SampleQueryRequest: &logproto.SampleQueryRequest{
Start: start,
End: end,
Selector: req.Selector,
Shards: req.Shards,
Deletes: req.Deletes,
}}
storeItr, err := i.store.SelectSamples(ctx, storeReq)
if err != nil {
errUtil.LogErrorWithContext(ctx, "closing iterator", it.Close)
return err
}
it = iter.NewMergeSampleIterator(ctx, []iter.SampleIterator{it, storeItr})
}
defer errUtil.LogErrorWithContext(ctx, "closing iterator", it.Close)
return sendSampleBatches(ctx, it, queryServer)
}
// asyncStoreMaxLookBack returns a max look back period only if active index type is one of async index stores like `boltdb-shipper` and `tsdb`.
// max look back is limited to from time of async store config.
// It considers previous periodic config's from time if that also has async index type.
// This is to limit the lookback to only async stores where relevant.
func (i *Ingester) asyncStoreMaxLookBack() time.Duration {
activePeriodicConfigIndex := config.ActivePeriodConfig(i.periodicConfigs)
activePeriodicConfig := i.periodicConfigs[activePeriodicConfigIndex]
if activePeriodicConfig.IndexType != config.BoltDBShipperType && activePeriodicConfig.IndexType != config.TSDBType {
return 0
}
startTime := activePeriodicConfig.From
if activePeriodicConfigIndex != 0 && (i.periodicConfigs[activePeriodicConfigIndex-1].IndexType == config.BoltDBShipperType ||
i.periodicConfigs[activePeriodicConfigIndex-1].IndexType == config.TSDBType) {
startTime = i.periodicConfigs[activePeriodicConfigIndex-1].From
}
maxLookBack := time.Since(startTime.Time.Time())
return maxLookBack
}
// GetChunkIDs is meant to be used only when using an async store like boltdb-shipper or tsdb.
func (i *Ingester) GetChunkIDs(ctx context.Context, req *logproto.GetChunkIDsRequest) (*logproto.GetChunkIDsResponse, error) {
orgID, err := tenant.TenantID(ctx)
if err != nil {
return nil, err
}
asyncStoreMaxLookBack := i.asyncStoreMaxLookBack()
if asyncStoreMaxLookBack == 0 {
return &logproto.GetChunkIDsResponse{}, nil
}
reqStart := req.Start
reqStart = adjustQueryStartTime(asyncStoreMaxLookBack, reqStart, time.Now())
// parse the request
start, end := errUtil.RoundToMilliseconds(reqStart, req.End)
matchers, err := syntax.ParseMatchers(req.Matchers)
if err != nil {
return nil, err
}
// get chunk references
chunksGroups, _, err := i.store.GetChunkRefs(ctx, orgID, start, end, matchers...)
if err != nil {
return nil, err
}
// todo (Callum) ingester should maybe store the whole schema config?
s := config.SchemaConfig{
Configs: i.periodicConfigs,
}
// build the response
resp := logproto.GetChunkIDsResponse{ChunkIDs: []string{}}
for _, chunks := range chunksGroups {
for _, chk := range chunks {
resp.ChunkIDs = append(resp.ChunkIDs, s.ExternalKey(chk.ChunkRef))
}
}
return &resp, nil
}
// Label returns the set of labels for the stream this ingester knows about.
func (i *Ingester) Label(ctx context.Context, req *logproto.LabelRequest) (*logproto.LabelResponse, error) {
userID, err := tenant.TenantID(ctx)
if err != nil {
return nil, err
}
instance, err := i.GetOrCreateInstance(userID)
if err != nil {
return nil, err
}
resp, err := instance.Label(ctx, req)
if err != nil {
return nil, err
}
if req.Start == nil {
return resp, nil
}
// Only continue if the active index type is one of async index store types or QueryStore flag is true.
asyncStoreMaxLookBack := i.asyncStoreMaxLookBack()
if asyncStoreMaxLookBack == 0 && !i.cfg.QueryStore {
return resp, nil
}
var cs storage.Store
var ok bool
if cs, ok = i.store.(storage.Store); !ok {
return resp, nil
}
maxLookBackPeriod := i.cfg.QueryStoreMaxLookBackPeriod
if asyncStoreMaxLookBack != 0 {
maxLookBackPeriod = asyncStoreMaxLookBack
}
// Adjust the start time based on QueryStoreMaxLookBackPeriod.
start := adjustQueryStartTime(maxLookBackPeriod, *req.Start, time.Now())
if start.After(*req.End) {
// The request is older than we are allowed to query the store, just return what we have.
return resp, nil
}
from, through := model.TimeFromUnixNano(start.UnixNano()), model.TimeFromUnixNano(req.End.UnixNano())
var storeValues []string
if req.Values {
storeValues, err = cs.LabelValuesForMetricName(ctx, userID, from, through, "logs", req.Name)
if err != nil {
return nil, err
}
} else {
storeValues, err = cs.LabelNamesForMetricName(ctx, userID, from, through, "logs")
if err != nil {
return nil, err
}
}
return &logproto.LabelResponse{
Values: errUtil.MergeStringLists(resp.Values, storeValues),
}, nil
}
// Series queries the ingester for log stream identifiers (label sets) matching a set of matchers
func (i *Ingester) Series(ctx context.Context, req *logproto.SeriesRequest) (*logproto.SeriesResponse, error) {
instanceID, err := tenant.TenantID(ctx)
if err != nil {
return nil, err
}
instance, err := i.GetOrCreateInstance(instanceID)
if err != nil {
return nil, err
}
return instance.Series(ctx, req)
}
func (i *Ingester) GetStats(ctx context.Context, req *logproto.IndexStatsRequest) (*logproto.IndexStatsResponse, error) {
user, err := tenant.TenantID(ctx)
if err != nil {
return nil, err
}
instance, err := i.GetOrCreateInstance(user)
if err != nil {
return nil, err
}
matchers, err := syntax.ParseMatchers(req.Matchers)
if err != nil {
return nil, err
}
type f func() (*logproto.IndexStatsResponse, error)
jobs := []f{
f(func() (*logproto.IndexStatsResponse, error) {
return instance.GetStats(ctx, req)
}),
f(func() (*logproto.IndexStatsResponse, error) {
return i.store.Stats(ctx, user, req.From, req.Through, matchers...)
}),
}
resps := make([]*logproto.IndexStatsResponse, len(jobs))
if err := concurrency.ForEachJob(
ctx,
len(jobs),
2,
func(ctx context.Context, idx int) error {
res, err := jobs[idx]()
resps[idx] = res
return err
},
); err != nil {
return nil, err
}
merged := index_stats.MergeStats(resps...)
return &merged, nil
}
// Watch implements grpc_health_v1.HealthCheck.
func (*Ingester) Watch(*grpc_health_v1.HealthCheckRequest, grpc_health_v1.Health_WatchServer) error {
return nil
}
// ReadinessHandler is used to indicate to k8s when the ingesters are ready for
// the addition removal of another ingester. Returns 204 when the ingester is
// ready, 500 otherwise.
func (i *Ingester) CheckReady(ctx context.Context) error {
if s := i.State(); s != services.Running && s != services.Stopping {
return fmt.Errorf("ingester not ready: %v", s)
}
return i.lifecycler.CheckReady(ctx)
}
func (i *Ingester) getInstanceByID(id string) (*instance, bool) {
i.instancesMtx.RLock()
defer i.instancesMtx.RUnlock()
inst, ok := i.instances[id]
return inst, ok
}
func (i *Ingester) getInstances() []*instance {
i.instancesMtx.RLock()
defer i.instancesMtx.RUnlock()
instances := make([]*instance, 0, len(i.instances))
for _, instance := range i.instances {
instances = append(instances, instance)
}
return instances
}
// Tail logs matching given query
func (i *Ingester) Tail(req *logproto.TailRequest, queryServer logproto.Querier_TailServer) error {
select {
case <-i.tailersQuit:
return errors.New("Ingester is stopping")
default:
}
instanceID, err := tenant.TenantID(queryServer.Context())
if err != nil {
return err
}
instance, err := i.GetOrCreateInstance(instanceID)
if err != nil {
return err
}
tailer, err := newTailer(instanceID, req.Query, queryServer, i.cfg.MaxDroppedStreams)
if err != nil {
return err
}
if err := instance.addNewTailer(queryServer.Context(), tailer); err != nil {
return err
}
tailer.loop()
return nil
}
// TailersCount returns count of active tail requests from a user
func (i *Ingester) TailersCount(ctx context.Context, in *logproto.TailersCountRequest) (*logproto.TailersCountResponse, error) {
instanceID, err := tenant.TenantID(ctx)