From e11ac855ba074308735271208931c20bc2ce0445 Mon Sep 17 00:00:00 2001 From: Patrick Oyarzun Date: Fri, 17 Jun 2022 17:22:25 -0500 Subject: [PATCH 01/63] helm: Add smoke-test job for `helm test` (#2061) * helm: Add smoke-test job for `helm test` * update continuous-test to r190 * Use temporary image with 4xx failure * Update golden record * Disable smoke test on enterprise deployments * Add "test passed" logging to the smoke-test * Update continuous-test image and move to end of values.yaml * Fix test build * Fix helm test * Fix helm lint * Update helm tests --- cmd/mimir-continuous-test/main.go | 2 +- .../charts/mimir-distributed/CHANGELOG.md | 1 + .../mimir-distributed/templates/_helpers.tpl | 10 +++ .../templates/smoke-test/smoke-test-job.yaml | 78 +++++++++++++++++++ .../helm/charts/mimir-distributed/values.yaml | 14 ++++ .../templates/smoke-test/smoke-test-job.yaml | 52 +++++++++++++ pkg/continuoustest/manager.go | 17 +++- pkg/continuoustest/manager_test.go | 10 ++- pkg/continuoustest/write_read_series.go | 2 +- 9 files changed, 177 insertions(+), 9 deletions(-) create mode 100644 operations/helm/charts/mimir-distributed/templates/smoke-test/smoke-test-job.yaml create mode 100644 operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/smoke-test/smoke-test-job.yaml diff --git a/cmd/mimir-continuous-test/main.go b/cmd/mimir-continuous-test/main.go index e528bb47ec..3aa378ea21 100644 --- a/cmd/mimir-continuous-test/main.go +++ b/cmd/mimir-continuous-test/main.go @@ -72,7 +72,7 @@ func main() { } // Run continuous testing. - m := continuoustest.NewManager(cfg.Manager) + m := continuoustest.NewManager(cfg.Manager, logger) m.AddTest(continuoustest.NewWriteReadSeriesTest(cfg.WriteReadSeriesTest, client, logger, registry)) if err := m.Run(context.Background()); err != nil { level.Error(logger).Log("msg", "Failed to run continuous test", "err", err.Error()) diff --git a/operations/helm/charts/mimir-distributed/CHANGELOG.md b/operations/helm/charts/mimir-distributed/CHANGELOG.md index f5e1414f1e..b60fdb3327 100644 --- a/operations/helm/charts/mimir-distributed/CHANGELOG.md +++ b/operations/helm/charts/mimir-distributed/CHANGELOG.md @@ -15,6 +15,7 @@ Entries should include a reference to the Pull Request that introduced the chang * [CHANGE] Enable multi-tenancy by default. This means `multitenancy_enabled` is now `true` for both Mimir and Enterprise Metrics. Nginx will inject `X-Scope-OrgID=anonymous` header if the header is not present, ensuring backwards compatibility. #2117 * [CHANGE] Change default value for `blocks_storage.bucket_store.chunks_cache.memcached.timeout` to `450ms` to increase use of cached data. #2035 +* [FEATURE] Add `mimir-continuous-test` in smoke-test mode. Use `helm test` to run a smoke test of the read + write path. * [ENHANCEMENT] Set the flag `ingester.ring.instance-availability-zone` to `zone-default` for ingesters. This is the first step of introducing multi-zone ingesters. #2114 * [ENHANCEMENT] Add `mimir.structuredConfig` for adding and modifing `mimir.config` values after template evaulation. It can be used to alter individual values in the configuration and it's structured YAML instead of text. #2100 * [ENHANCEMENT] Add `global.podAnnotations` which can add POD annotations to PODs directly controlled by this chart (mimir services, nginx). #2099 diff --git a/operations/helm/charts/mimir-distributed/templates/_helpers.tpl b/operations/helm/charts/mimir-distributed/templates/_helpers.tpl index c5a5579446..095c847981 100644 --- a/operations/helm/charts/mimir-distributed/templates/_helpers.tpl +++ b/operations/helm/charts/mimir-distributed/templates/_helpers.tpl @@ -31,6 +31,16 @@ Calculate the infix for naming {{- if and .Values.enterprise.enabled .Values.enterprise.legacyLabels -}}enterprise-metrics{{- else -}}mimir{{- end -}} {{- end -}} +{{/* +Calculate the gateway url +*/}} +{{- define "mimir.gatewayUrl" -}} +{{- if .Values.enterprise.enabled -}} +http://{{ template "mimir.fullname" . }}-gateway.{{ .Release.Namespace }}.svc:{{ .Values.gateway.service.port | default (include "mimir.serverHttpListenPort" . ) }} +{{- else -}} +http://{{ template "mimir.fullname" . }}-nginx.{{ .Release.Namespace }}.svc:{{ .Values.nginx.service.port }} +{{- end -}} +{{- end -}} {{/* Create chart name and version as used by the chart label. diff --git a/operations/helm/charts/mimir-distributed/templates/smoke-test/smoke-test-job.yaml b/operations/helm/charts/mimir-distributed/templates/smoke-test/smoke-test-job.yaml new file mode 100644 index 0000000000..1efd0960f5 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/smoke-test/smoke-test-job.yaml @@ -0,0 +1,78 @@ +{{ if not .Values.enterprise.enabled -}} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "mimir.resourceName" (dict "ctx" . "component" "smoke-test") }} + labels: + {{- include "mimir.labels" (dict "ctx" . "component" "smoke-test") | nindent 4 }} + annotations: + {{- if .Values.smoke_test.annotations }} + {{- toYaml .Values.smoke_test.annotations | nindent 4 }} + {{- end }} + "helm.sh/hook": test +spec: + backoffLimit: 5 + completions: 1 + parallelism: 1 + selector: + template: + metadata: + labels: + {{- include "mimir.podLabels" (dict "ctx" . "component" "smoke-test") | nindent 8 }} + {{- with .Values.smoke_test.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "mimir.serviceAccountName" . }} + {{- if .Values.smoke_test.priorityClassName }} + priorityClassName: {{ .Values.smoke_test.priorityClassName }} + {{- end }} + securityContext: + {{- toYaml .Values.smoke_test.securityContext | nindent 8 }} + {{- if .Values.image.pullSecrets }} + imagePullSecrets: + {{- range .Values.image.pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} + initContainers: + {{- toYaml .Values.smoke_test.initContainers | nindent 8 }} + containers: + - name: smoke-test + image: "{{ .Values.smoke_test.image.repository }}:{{ .Values.smoke_test.image.tag }}" + imagePullPolicy: {{ .Values.smoke_test.pullPolicy }} + args: + - "-tests.smoke-test" + - "-tests.write-endpoint={{ template "mimir.gatewayUrl" . }}" + - "-tests.read-endpoint={{ template "mimir.gatewayUrl" . }}/prometheus" + - "-tests.tenant-id={{ .Values.smoke_test.tenantId }}" + - "-tests.write-read-series-test.num-series=1000" + - "-tests.write-read-series-test.max-query-age=48h" + - "-server.metrics-port={{ include "mimir.serverHttpListenPort" . }}" + {{- range $key, $value := .Values.smoke_test.extraArgs }} + - "-{{ $key }}={{ $value }}" + {{- end }} + volumeMounts: + {{- if .Values.smoke_test.extraVolumeMounts }} + {{ toYaml .Values.smoke_test.extraVolumeMounts | nindent 12 }} + {{- end }} + env: + {{- with .Values.global.extraEnv }} + {{ toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.smoke_test.env }} + {{ toYaml . | nindent 12 }} + {{- end }} + envFrom: + {{- with .Values.global.extraEnvFrom }} + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.smoke_test.extraEnvFrom }} + {{- toYaml . | nindent 12 }} + {{- end }} + restartPolicy: OnFailure + volumes: + {{- if .Values.smoke_test.extraVolumes }} + {{ toYaml .Values.smoke_test.extraVolumes | nindent 8 }} + {{- end }} +{{- end }} \ No newline at end of file diff --git a/operations/helm/charts/mimir-distributed/values.yaml b/operations/helm/charts/mimir-distributed/values.yaml index a7ac864fc8..3923863486 100644 --- a/operations/helm/charts/mimir-distributed/values.yaml +++ b/operations/helm/charts/mimir-distributed/values.yaml @@ -1602,3 +1602,17 @@ gateway: - secretName: gem-gateway-tls hosts: - gateway.gem.example.com + +# Settings for the smoke-test job. +smoke_test: + image: + repository: grafana/mimir-continuous-test + # TODO: replace with weekly tag starting with r191 + tag: helm-smoke-test-d12276bfc + pullPolicy: IfNotPresent + tenantId: '' + extraArgs: {} + env: [] + extraEnvFrom: [] + annotations: {} + initContainers: [] diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/smoke-test/smoke-test-job.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/smoke-test/smoke-test-job.yaml new file mode 100644 index 0000000000..742c6ba1d8 --- /dev/null +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/smoke-test/smoke-test-job.yaml @@ -0,0 +1,52 @@ +--- +# Source: mimir-distributed/templates/smoke-test/smoke-test-job.yaml +apiVersion: batch/v1 +kind: Job +metadata: + name: test-oss-values-mimir-smoke-test + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-values + app.kubernetes.io/component: smoke-test + app.kubernetes.io/version: "2.1.0" + app.kubernetes.io/managed-by: Helm + annotations: + "helm.sh/hook": test +spec: + backoffLimit: 5 + completions: 1 + parallelism: 1 + selector: + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-values + app.kubernetes.io/version: "2.1.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: smoke-test + spec: + serviceAccountName: test-oss-values-mimir + securityContext: + null + initContainers: + [] + containers: + - name: smoke-test + image: "grafana/mimir-continuous-test:helm-smoke-test-d12276bfc" + imagePullPolicy: + args: + - "-tests.smoke-test" + - "-tests.write-endpoint=http://test-oss-values-mimir-nginx.citestns.svc:80" + - "-tests.read-endpoint=http://test-oss-values-mimir-nginx.citestns.svc:80/prometheus" + - "-tests.tenant-id=" + - "-tests.write-read-series-test.num-series=1000" + - "-tests.write-read-series-test.max-query-age=48h" + - "-server.metrics-port=8080" + volumeMounts: + env: + envFrom: + - secretRef: + name: mimir-minio-secret + restartPolicy: OnFailure + volumes: diff --git a/pkg/continuoustest/manager.go b/pkg/continuoustest/manager.go index ab0e4a6b41..7e71aed74d 100644 --- a/pkg/continuoustest/manager.go +++ b/pkg/continuoustest/manager.go @@ -7,6 +7,8 @@ import ( "flag" "time" + "github.com/go-kit/log" + "github.com/go-kit/log/level" "golang.org/x/sync/errgroup" ) @@ -33,13 +35,15 @@ func (cfg *ManagerConfig) RegisterFlags(f *flag.FlagSet) { } type Manager struct { - cfg ManagerConfig - tests []Test + cfg ManagerConfig + logger log.Logger + tests []Test } -func NewManager(cfg ManagerConfig) *Manager { +func NewManager(cfg ManagerConfig, logger log.Logger) *Manager { return &Manager{ - cfg: cfg, + cfg: cfg, + logger: logger, } } @@ -65,6 +69,11 @@ func (m *Manager) Run(ctx context.Context) error { // Run it immediately, and then every configured period. err := t.Run(ctx, time.Now()) if m.cfg.SmokeTest { + if err != nil { + level.Info(m.logger).Log("msg", "Test failed", "test", t.Name(), "err", err) + } else { + level.Info(m.logger).Log("msg", "Test passed", "test", t.Name()) + } return err } diff --git a/pkg/continuoustest/manager_test.go b/pkg/continuoustest/manager_test.go index 2dd95873fb..cced69240f 100644 --- a/pkg/continuoustest/manager_test.go +++ b/pkg/continuoustest/manager_test.go @@ -8,6 +8,7 @@ import ( "testing" "time" + "github.com/go-kit/log" "github.com/pkg/errors" "github.com/stretchr/testify/require" ) @@ -34,11 +35,12 @@ func (d *dummyTest) Run(ctx context.Context, now time.Time) error { } func TestManager_PeriodicRun(t *testing.T) { + logger := log.NewNopLogger() cfg := ManagerConfig{} cfg.RegisterFlags(flag.NewFlagSet("", flag.ContinueOnError)) cfg.RunInterval = time.Millisecond * 10 - manager := NewManager(cfg) + manager := NewManager(cfg, logger) dummyTest := &dummyTest{} manager.AddTest(dummyTest) @@ -55,12 +57,13 @@ func TestManager_PeriodicRun(t *testing.T) { func TestManager_SmokeTest(t *testing.T) { t.Run("successful smoke test", func(t *testing.T) { + logger := log.NewNopLogger() cfg := ManagerConfig{} cfg.RegisterFlags(flag.NewFlagSet("", flag.ContinueOnError)) cfg.RunInterval = time.Millisecond * 10 cfg.SmokeTest = true - manager := NewManager(cfg) + manager := NewManager(cfg, logger) dummyTest := &dummyTest{} manager.AddTest(dummyTest) @@ -74,12 +77,13 @@ func TestManager_SmokeTest(t *testing.T) { }) t.Run("failed smoke test", func(t *testing.T) { + logger := log.NewNopLogger() cfg := ManagerConfig{} cfg.RegisterFlags(flag.NewFlagSet("", flag.ContinueOnError)) cfg.RunInterval = time.Millisecond * 10 cfg.SmokeTest = true - manager := NewManager(cfg) + manager := NewManager(cfg, logger) dummyTest := &dummyTest{} dummyTest.err = errors.New("test error") diff --git a/pkg/continuoustest/write_read_series.go b/pkg/continuoustest/write_read_series.go index 1da4b043ce..d0ec8e6a45 100644 --- a/pkg/continuoustest/write_read_series.go +++ b/pkg/continuoustest/write_read_series.go @@ -80,7 +80,7 @@ func (t *WriteReadSeriesTest) Init(ctx context.Context, now time.Time) error { from, to := t.findPreviouslyWrittenTimeRange(ctx, now) if from.IsZero() || to.IsZero() { - level.Info(t.logger).Log("msg", "No valid previously written samples time range found") + level.Info(t.logger).Log("msg", "No valid previously written samples time range found, will continue writing from the nearest interval-aligned timestamp") return nil } if to.Before(now.Add(-writeMaxAge)) { From e6b8ef61ecd82e6d90bfba7ebc2977748b92aa4f Mon Sep 17 00:00:00 2001 From: Steve Simpson Date: Mon, 20 Jun 2022 14:29:51 +0200 Subject: [PATCH 02/63] Jsonnet: Support deploying isolated query path for rule evaluation. (#2073) Adds experimental support to the jsonnet for deploying an isolated query path which can be used for remote rule evaluation. --- CHANGELOG.md | 1 + ...est-ruler-remote-evaluation-generated.yaml | 2068 +++++++++++++++++ ...remote-evaluation-migration-generated.yaml | 2067 ++++++++++++++++ ...-ruler-remote-evaluation-migration.jsonnet | 24 + .../test-ruler-remote-evaluation.jsonnet | 23 + operations/mimir/mimir.libsonnet | 1 + .../mimir/ruler-remote-evaluation.libsonnet | 84 + 7 files changed, 4268 insertions(+) create mode 100644 operations/mimir-tests/test-ruler-remote-evaluation-generated.yaml create mode 100644 operations/mimir-tests/test-ruler-remote-evaluation-migration-generated.yaml create mode 100644 operations/mimir-tests/test-ruler-remote-evaluation-migration.jsonnet create mode 100644 operations/mimir-tests/test-ruler-remote-evaluation.jsonnet create mode 100644 operations/mimir/ruler-remote-evaluation.libsonnet diff --git a/CHANGELOG.md b/CHANGELOG.md index 9539902202..eb0797375e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -73,6 +73,7 @@ * `autoscaling_querier_min_replicas`: minimum number of querier replicas. * `autoscaling_querier_max_replicas`: maximum number of querier replicas. * `autoscaling_prometheus_url`: Prometheus base URL from which to scrape Mimir metrics (e.g. `http://prometheus.default:9090/prometheus`). +* [FEATURE] Jsonnet: Add support for ruler remote evaluation mode (`ruler_remote_evaluation_enabled`), which deploys and uses a dedicated query path for rule evaluation. This enables the benefits of the query-frontend for rule evaluation, such as query sharding. #2073 * [ENHANCEMENT] Added `compactor` service, that can be used to route requests directly to compactor (e.g. admin UI). #2063 * [ENHANCEMENT] Added a `consul_enabled` configuration option that defaults to true (matching previous behavior) to provide the ability to disable consul. #2093 diff --git a/operations/mimir-tests/test-ruler-remote-evaluation-generated.yaml b/operations/mimir-tests/test-ruler-remote-evaluation-generated.yaml new file mode 100644 index 0000000000..7ec949a8eb --- /dev/null +++ b/operations/mimir-tests/test-ruler-remote-evaluation-generated.yaml @@ -0,0 +1,2068 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: default +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: alertmanager-pdb + name: alertmanager-pdb + namespace: default +spec: + maxUnavailable: 1 + selector: + matchLabels: + name: alertmanager +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: ingester-pdb + name: ingester-pdb + namespace: default +spec: + maxUnavailable: 1 + selector: + matchLabels: + name: ingester +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: store-gateway-pdb + name: store-gateway-pdb + namespace: default +spec: + maxUnavailable: 2 + selector: + matchLabels: + name: store-gateway +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: consul-sidekick + namespace: default +--- +apiVersion: v1 +data: + consul-config.json: '{"leave_on_terminate": true, "raft_snapshot_threshold": 128, + "raft_trailing_logs": 10000, "telemetry": {"dogstatsd_addr": "127.0.0.1:9125"}}' + mapping: | + mappings: + - match: consul.*.runtime.* + name: consul_runtime + labels: + type: $2 + - match: consul.runtime.total_gc_pause_ns + name: consul_runtime_total_gc_pause_ns + labels: + type: $2 + - match: consul.consul.health.service.query-tag.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3 + - match: consul.consul.health.service.query-tag.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4 + - match: consul.consul.health.service.query-tag.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11.$12 + - match: consul.consul.catalog.deregister + name: consul_catalog_deregister + labels: {} + - match: consul.consul.dns.domain_query.*.*.*.*.* + name: consul_dns_domain_query + labels: + query: $1.$2.$3.$4.$5 + - match: consul.consul.health.service.not-found.* + name: consul_health_service_not_found + labels: + query: $1 + - match: consul.consul.health.service.query.* + name: consul_health_service_query + labels: + query: $1 + - match: consul.*.memberlist.health.score + name: consul_memberlist_health_score + labels: {} + - match: consul.serf.queue.* + name: consul_serf_events + labels: + type: $1 + - match: consul.serf.snapshot.appendLine + name: consul_serf_snapshot_appendLine + labels: + type: $1 + - match: consul.serf.coordinate.adjustment-ms + name: consul_serf_coordinate_adjustment_ms + labels: {} + - match: consul.consul.rpc.query + name: consul_rpc_query + labels: {} + - match: consul.*.consul.session_ttl.active + name: consul_session_ttl_active + labels: {} + - match: consul.raft.rpc.* + name: consul_raft_rpc + labels: + type: $1 + - match: consul.raft.rpc.appendEntries.storeLogs + name: consul_raft_rpc_appendEntries_storeLogs + labels: + type: $1 + - match: consul.consul.fsm.persist + name: consul_fsm_persist + labels: {} + - match: consul.raft.fsm.apply + name: consul_raft_fsm_apply + labels: {} + - match: consul.raft.leader.lastContact + name: consul_raft_leader_lastcontact + labels: {} + - match: consul.raft.leader.dispatchLog + name: consul_raft_leader_dispatchLog + labels: {} + - match: consul.raft.commitTime + name: consul_raft_commitTime + labels: {} + - match: consul.raft.replication.appendEntries.logs.*.*.*.* + name: consul_raft_replication_appendEntries_logs + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.raft.replication.appendEntries.rpc.*.*.*.* + name: consul_raft_replication_appendEntries_rpc + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.raft.replication.heartbeat.*.*.*.* + name: consul_raft_replication_heartbeat + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.consul.rpc.request + name: consul_rpc_requests + labels: {} + - match: consul.consul.rpc.accept_conn + name: consul_rpc_accept_conn + labels: {} + - match: consul.memberlist.udp.* + name: consul_memberlist_udp + labels: + type: $1 + - match: consul.memberlist.tcp.* + name: consul_memberlist_tcp + labels: + type: $1 + - match: consul.memberlist.gossip + name: consul_memberlist_gossip + labels: {} + - match: consul.memberlist.probeNode + name: consul_memberlist_probenode + labels: {} + - match: consul.memberlist.pushPullNode + name: consul_memberlist_pushpullnode + labels: {} + - match: consul.http.* + name: consul_http_request + labels: + method: $1 + path: / + - match: consul.http.*.* + name: consul_http_request + labels: + method: $1 + path: /$2 + - match: consul.http.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3 + - match: consul.http.*.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3/$4 + - match: consul.http.*.*.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3/$4/$5 + - match: consul.consul.leader.barrier + name: consul_leader_barrier + labels: {} + - match: consul.consul.leader.reconcileMember + name: consul_leader_reconcileMember + labels: {} + - match: consul.consul.leader.reconcile + name: consul_leader_reconcile + labels: {} + - match: consul.consul.fsm.coordinate.batch-update + name: consul_fsm_coordinate_batch_update + labels: {} + - match: consul.consul.fsm.autopilot + name: consul_fsm_autopilot + labels: {} + - match: consul.consul.fsm.kvs.cas + name: consul_fsm_kvs_cas + labels: {} + - match: consul.consul.fsm.register + name: consul_fsm_register + labels: {} + - match: consul.consul.fsm.deregister + name: consul_fsm_deregister + labels: {} + - match: consul.consul.fsm.tombstone.reap + name: consul_fsm_tombstone_reap + labels: {} + - match: consul.consul.catalog.register + name: consul_catalog_register + labels: {} + - match: consul.consul.catalog.deregister + name: consul_catalog_deregister + labels: {} + - match: consul.consul.leader.reapTombstones + name: consul_leader_reapTombstones + labels: {} +kind: ConfigMap +metadata: + name: consul + namespace: default +--- +apiVersion: v1 +data: + overrides.yaml: | + overrides: {} +kind: ConfigMap +metadata: + name: overrides + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: consul-sidekick + namespace: default +rules: +- apiGroups: + - "" + - extensions + - apps + resources: + - pods + - replicasets + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: consul-sidekick + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: consul-sidekick +subjects: +- kind: ServiceAccount + name: consul-sidekick + namespace: default +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: alertmanager + name: alertmanager + namespace: default +spec: + clusterIP: None + ports: + - name: alertmanager-http-metrics + port: 8080 + targetPort: 8080 + - name: alertmanager-grpc + port: 9095 + targetPort: 9095 + selector: + name: alertmanager +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: compactor + name: compactor + namespace: default +spec: + clusterIP: None + ports: + - name: compactor-http-metrics + port: 8080 + targetPort: 8080 + - name: compactor-grpc + port: 9095 + targetPort: 9095 + selector: + name: compactor +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: consul + name: consul + namespace: default +spec: + ports: + - name: consul-server + port: 8300 + targetPort: 8300 + - name: consul-serf + port: 8301 + targetPort: 8301 + - name: consul-client + port: 8400 + targetPort: 8400 + - name: consul-api + port: 8500 + targetPort: 8500 + - name: statsd-exporter-http-metrics + port: 8000 + targetPort: 8000 + - name: consul-exporter-http-metrics + port: 9107 + targetPort: 9107 + selector: + name: consul +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: distributor + name: distributor + namespace: default +spec: + clusterIP: None + ports: + - name: distributor-http-metrics + port: 8080 + targetPort: 8080 + - name: distributor-grpc + port: 9095 + targetPort: 9095 + selector: + name: distributor +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ingester + name: ingester + namespace: default +spec: + ports: + - name: ingester-http-metrics + port: 8080 + targetPort: 8080 + - name: ingester-grpc + port: 9095 + targetPort: 9095 + selector: + name: ingester +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached + name: memcached + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-frontend + name: memcached-frontend + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-index-queries + name: memcached-index-queries + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-index-queries +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-metadata + name: memcached-metadata + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-metadata +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: querier + name: querier + namespace: default +spec: + ports: + - name: querier-http-metrics + port: 8080 + targetPort: 8080 + - name: querier-grpc + port: 9095 + targetPort: 9095 + selector: + name: querier +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-frontend + name: query-frontend + namespace: default +spec: + ports: + - name: query-frontend-http-metrics + port: 8080 + targetPort: 8080 + - name: query-frontend-grpc + port: 9095 + targetPort: 9095 + selector: + name: query-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-frontend + name: query-frontend-discovery + namespace: default +spec: + clusterIP: None + ports: + - name: query-frontend-http-metrics + port: 8080 + targetPort: 8080 + - name: query-frontend-grpc + port: 9095 + targetPort: 9095 + publishNotReadyAddresses: true + selector: + name: query-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-scheduler + name: query-scheduler + namespace: default +spec: + ports: + - name: query-scheduler-http-metrics + port: 8080 + targetPort: 8080 + - name: query-scheduler-grpc + port: 9095 + targetPort: 9095 + selector: + name: query-scheduler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-scheduler + name: query-scheduler-discovery + namespace: default +spec: + clusterIP: None + ports: + - name: query-scheduler-http-metrics + port: 8080 + targetPort: 8080 + - name: query-scheduler-grpc + port: 9095 + targetPort: 9095 + publishNotReadyAddresses: true + selector: + name: query-scheduler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ruler + name: ruler + namespace: default +spec: + ports: + - name: ruler-http-metrics + port: 8080 + targetPort: 8080 + - name: ruler-grpc + port: 9095 + targetPort: 9095 + selector: + name: ruler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ruler-querier + name: ruler-querier + namespace: default +spec: + ports: + - name: ruler-querier-http-metrics + port: 8080 + targetPort: 8080 + - name: ruler-querier-grpc + port: 9095 + targetPort: 9095 + selector: + name: ruler-querier +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ruler-query-frontend + name: ruler-query-frontend + namespace: default +spec: + clusterIP: None + ports: + - name: ruler-query-frontend-http-metrics + port: 8080 + targetPort: 8080 + - name: ruler-query-frontend-grpc + port: 9095 + targetPort: 9095 + selector: + name: ruler-query-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ruler-query-scheduler + name: ruler-query-scheduler + namespace: default +spec: + ports: + - name: ruler-query-scheduler-http-metrics + port: 8080 + targetPort: 8080 + - name: ruler-query-scheduler-grpc + port: 9095 + targetPort: 9095 + selector: + name: ruler-query-scheduler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ruler-query-scheduler + name: ruler-query-scheduler-discovery + namespace: default +spec: + clusterIP: None + ports: + - name: ruler-query-scheduler-http-metrics + port: 8080 + targetPort: 8080 + - name: ruler-query-scheduler-grpc + port: 9095 + targetPort: 9095 + publishNotReadyAddresses: true + selector: + name: ruler-query-scheduler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: store-gateway + name: store-gateway + namespace: default +spec: + ports: + - name: store-gateway-http-metrics + port: 8080 + targetPort: 8080 + - name: store-gateway-grpc + port: 9095 + targetPort: 9095 + selector: + name: store-gateway +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: consul + namespace: default +spec: + minReadySeconds: 10 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: consul + template: + metadata: + annotations: + consul-hash: e56ef6821a3557604caccaf6d5820239 + labels: + name: consul + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: consul + topologyKey: kubernetes.io/hostname + - labelSelector: + matchLabels: + name: ingester + namespaces: + - default + topologyKey: kubernetes.io/hostname + containers: + - args: + - agent + - -ui + - -server + - -client=0.0.0.0 + - -config-file=/etc/config/consul-config.json + - -bootstrap-expect=1 + - -ui-content-path=/default/consul/ + env: + - name: CHECKPOINT_DISABLE + value: "1" + image: consul:1.5.3 + imagePullPolicy: IfNotPresent + name: consul + ports: + - containerPort: 8300 + name: server + - containerPort: 8301 + name: serf + - containerPort: 8400 + name: client + - containerPort: 8500 + name: api + resources: + requests: + cpu: "4" + memory: 4Gi + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --namespace=$(POD_NAMESPACE) + - --pod-name=$(POD_NAME) + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + image: weaveworks/consul-sidekick:master-f18ad13 + imagePullPolicy: IfNotPresent + name: sidekick + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --web.listen-address=:8000 + - --statsd.mapping-config=/etc/config/mapping + image: prom/statsd-exporter:v0.12.2 + imagePullPolicy: IfNotPresent + name: statsd-exporter + ports: + - containerPort: 8000 + name: http-metrics + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --consul.server=localhost:8500 + - --web.listen-address=:9107 + - --consul.timeout=1s + - --no-consul.health-summary + - --consul.allow_stale + image: prom/consul-exporter:v0.5.0 + imagePullPolicy: IfNotPresent + name: consul-exporter + ports: + - containerPort: 9107 + name: http-metrics + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + serviceAccount: consul-sidekick + volumes: + - configMap: + name: consul + name: consul + - emptyDir: + medium: Memory + name: data +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: distributor + namespace: default +spec: + minReadySeconds: 10 + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: distributor + strategy: + rollingUpdate: + maxSurge: 5 + maxUnavailable: 1 + template: + metadata: + labels: + name: distributor + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: distributor + topologyKey: kubernetes.io/hostname + containers: + - args: + - -distributor.ha-tracker.enable=true + - -distributor.ha-tracker.enable-for-all-users=true + - -distributor.ha-tracker.etcd.endpoints=etcd-client.default.svc.cluster.local.:2379 + - -distributor.ha-tracker.prefix=prom_ha/ + - -distributor.ha-tracker.store=etcd + - -distributor.health-check-ingesters=true + - -distributor.ingestion-burst-size=200000 + - -distributor.ingestion-rate-limit=10000 + - -distributor.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -distributor.ring.prefix= + - -distributor.ring.store=consul + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -mem-ballast-size-bytes=1073741824 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.max-connection-age=2m + - -server.grpc.keepalive.max-connection-age-grace=5m + - -server.grpc.keepalive.max-connection-idle=1m + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=distributor + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: distributor + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 4Gi + requests: + cpu: "2" + memory: 2Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: querier + namespace: default +spec: + minReadySeconds: 10 + replicas: 6 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: querier + strategy: + rollingUpdate: + maxSurge: 5 + maxUnavailable: 1 + template: + metadata: + labels: + name: querier + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: querier + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -distributor.health-check-ingesters=true + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -mem-ballast-size-bytes=268435456 + - -querier.frontend-client.grpc-max-send-msg-size=104857600 + - -querier.max-concurrent=8 + - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -server.http-write-timeout=1m + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=consul + - -store.max-query-length=768h + - -target=querier + env: + - name: JAEGER_REPORTER_MAX_QUEUE_SIZE + value: "1024" + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: querier + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 24Gi + requests: + cpu: "1" + memory: 12Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: query-frontend + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: query-frontend + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + template: + metadata: + labels: + name: query-frontend + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: query-frontend + topologyKey: kubernetes.io/hostname + containers: + - args: + - -query-frontend.align-querier-with-step=false + - -query-frontend.cache-results=true + - -query-frontend.max-cache-freshness=10m + - -query-frontend.results-cache.backend=memcached + - -query-frontend.results-cache.memcached.addresses=dnssrvnoa+memcached-frontend.default.svc.cluster.local:11211 + - -query-frontend.results-cache.memcached.timeout=500ms + - -query-frontend.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -server.http-write-timeout=1m + - -store.max-query-length=12000h + - -target=query-frontend + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: query-frontend + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 1200Mi + requests: + cpu: "2" + memory: 600Mi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: query-scheduler + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: query-scheduler + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + name: query-scheduler + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: query-scheduler + topologyKey: kubernetes.io/hostname + containers: + - args: + - -query-scheduler.max-outstanding-requests-per-tenant=100 + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=query-scheduler + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: query-scheduler + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 2Gi + requests: + cpu: "2" + memory: 1Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ruler + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: ruler + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + name: ruler + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: ruler + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -distributor.health-check-ingesters=true + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -ruler-storage.backend=gcs + - -ruler-storage.gcs.bucket-name=rules-bucket + - -ruler.alertmanager-url=http://alertmanager.default.svc.cluster.local/alertmanager + - -ruler.max-rule-groups-per-tenant=35 + - -ruler.max-rules-per-rule-group=20 + - -ruler.query-frontend.address=dns:///ruler-query-frontend.default.svc.cluster.local:9095 + - -ruler.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ruler.ring.store=consul + - -ruler.rule-path=/rules + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=consul + - -store.max-query-length=768h + - -target=ruler + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ruler + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + cpu: "16" + memory: 16Gi + requests: + cpu: "1" + memory: 6Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + terminationGracePeriodSeconds: 600 + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ruler-querier + namespace: default +spec: + minReadySeconds: 10 + replicas: 6 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: ruler-querier + strategy: + rollingUpdate: + maxSurge: 5 + maxUnavailable: 1 + template: + metadata: + labels: + name: ruler-querier + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: ruler-querier + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -distributor.health-check-ingesters=true + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -mem-ballast-size-bytes=268435456 + - -querier.frontend-client.grpc-max-send-msg-size=104857600 + - -querier.max-concurrent=8 + - -querier.scheduler-address=ruler-query-scheduler-discovery.default.svc.cluster.local:9095 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -server.http-write-timeout=1m + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=consul + - -store.max-query-length=768h + - -target=querier + env: + - name: JAEGER_REPORTER_MAX_QUEUE_SIZE + value: "1024" + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ruler-querier + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 24Gi + requests: + cpu: "1" + memory: 12Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ruler-query-frontend + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: ruler-query-frontend + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + template: + metadata: + labels: + name: ruler-query-frontend + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: ruler-query-frontend + topologyKey: kubernetes.io/hostname + containers: + - args: + - -query-frontend.align-querier-with-step=false + - -query-frontend.cache-results=false + - -query-frontend.max-cache-freshness=10m + - -query-frontend.scheduler-address=ruler-query-scheduler-discovery.default.svc.cluster.local:9095 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -server.http-write-timeout=1m + - -store.max-query-length=12000h + - -target=query-frontend + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ruler-query-frontend + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 1200Mi + requests: + cpu: "2" + memory: 600Mi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ruler-query-scheduler + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: ruler-query-scheduler + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + name: ruler-query-scheduler + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: ruler-query-scheduler + topologyKey: kubernetes.io/hostname + containers: + - args: + - -query-scheduler.max-outstanding-requests-per-tenant=100 + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=query-scheduler + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ruler-query-scheduler + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 2Gi + requests: + cpu: "2" + memory: 1Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: alertmanager + name: alertmanager + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: alertmanager + serviceName: alertmanager + template: + metadata: + labels: + name: alertmanager + spec: + containers: + - args: + - -alertmanager-storage.backend=gcs + - -alertmanager-storage.gcs.bucket-name=alerts-bucket + - -alertmanager.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -alertmanager.sharding-ring.replication-factor=3 + - -alertmanager.sharding-ring.store=consul + - -alertmanager.storage.path=/data + - -alertmanager.web.external-url=http://test/alertmanager + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=alertmanager + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: alertmanager + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 15Gi + requests: + cpu: "2" + memory: 10Gi + volumeMounts: + - mountPath: /data + name: alertmanager-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 900 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: alertmanager-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: compactor + name: compactor + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + name: compactor + serviceName: compactor + template: + metadata: + labels: + name: compactor + spec: + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -compactor.block-ranges=2h,12h,24h + - -compactor.blocks-retention-period=0 + - -compactor.cleanup-interval=15m + - -compactor.compaction-concurrency=1 + - -compactor.compaction-interval=30m + - -compactor.compactor-tenant-shard-size=1 + - -compactor.data-dir=/data + - -compactor.deletion-delay=2h + - -compactor.max-closing-blocks-concurrency=2 + - -compactor.max-opening-blocks-concurrency=4 + - -compactor.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -compactor.ring.prefix= + - -compactor.ring.store=consul + - -compactor.ring.wait-stability-min-duration=1m + - -compactor.split-and-merge-shards=0 + - -compactor.split-groups=1 + - -compactor.symbols-flushers-concurrency=4 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=compactor + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: compactor + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 6Gi + requests: + cpu: 1 + memory: 6Gi + volumeMounts: + - mountPath: /data + name: compactor-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 900 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: compactor-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 250Gi + storageClassName: standard +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: ingester + name: ingester + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 3 + selector: + matchLabels: + name: ingester + serviceName: ingester + template: + metadata: + labels: + name: ingester + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: ingester + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -blocks-storage.tsdb.block-ranges-period=2h + - -blocks-storage.tsdb.dir=/data/tsdb + - -blocks-storage.tsdb.ship-interval=1m + - -distributor.health-check-ingesters=true + - -ingester.max-global-series-per-metric=20000 + - -ingester.max-global-series-per-user=150000 + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-period=15s + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.num-tokens=512 + - -ingester.ring.prefix= + - -ingester.ring.readiness-check-ring-health=false + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -ingester.ring.tokens-file-path=/data/tokens + - -ingester.ring.unregister-on-shutdown=true + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc-max-concurrent-streams=10000 + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=ingester + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ingester + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 25Gi + requests: + cpu: "4" + memory: 15Gi + volumeMounts: + - mountPath: /data + name: ingester-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 1200 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: ingester-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi + storageClassName: fast +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached + serviceName: memcached + template: + metadata: + labels: + name: memcached + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 6144 + - -I 1m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 9Gi + requests: + cpu: 500m + memory: 6552Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-frontend + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached-frontend + serviceName: memcached-frontend + template: + metadata: + labels: + name: memcached-frontend + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-frontend + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 1024 + - -I 5m + - -c 1024 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 1536Mi + requests: + cpu: 500m + memory: 1329Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-index-queries + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached-index-queries + serviceName: memcached-index-queries + template: + metadata: + labels: + name: memcached-index-queries + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-index-queries + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 1024 + - -I 5m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 1536Mi + requests: + cpu: 500m + memory: 1329Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-metadata + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + name: memcached-metadata + serviceName: memcached-metadata + template: + metadata: + labels: + name: memcached-metadata + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-metadata + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 512 + - -I 1m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 768Mi + requests: + cpu: 500m + memory: 715Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: store-gateway + name: store-gateway + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 3 + selector: + matchLabels: + name: store-gateway + serviceName: store-gateway + template: + metadata: + labels: + name: store-gateway + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: store-gateway + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.chunks-cache.backend=memcached + - -blocks-storage.bucket-store.chunks-cache.memcached.addresses=dnssrvnoa+memcached.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms + - -blocks-storage.bucket-store.index-cache.backend=memcached + - -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880 + - -blocks-storage.bucket-store.index-header-lazy-loading-enabled=true + - -blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout=60m + - -blocks-storage.bucket-store.max-chunk-pool-bytes=12884901888 + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.tokens-file-path=/data/tokens + - -store-gateway.sharding-ring.wait-stability-min-duration=1m + - -target=store-gateway + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: store-gateway + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 18Gi + requests: + cpu: "1" + memory: 12Gi + volumeMounts: + - mountPath: /data + name: store-gateway-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 120 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: store-gateway-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Gi + storageClassName: standard +--- +apiVersion: etcd.database.coreos.com/v1beta2 +kind: EtcdCluster +metadata: + annotations: + etcd.database.coreos.com/scope: clusterwide + name: etcd + namespace: default +spec: + pod: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + etcd_cluster: etcd + topologyKey: kubernetes.io/hostname + annotations: + prometheus.io/port: "2379" + prometheus.io/scrape: "true" + etcdEnv: + - name: ETCD_AUTO_COMPACTION_RETENTION + value: 1h + labels: + name: etcd + resources: + limits: + memory: 512Mi + requests: + cpu: 500m + memory: 512Mi + size: 3 + version: 3.3.13 diff --git a/operations/mimir-tests/test-ruler-remote-evaluation-migration-generated.yaml b/operations/mimir-tests/test-ruler-remote-evaluation-migration-generated.yaml new file mode 100644 index 0000000000..f8eabe1838 --- /dev/null +++ b/operations/mimir-tests/test-ruler-remote-evaluation-migration-generated.yaml @@ -0,0 +1,2067 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: default +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: alertmanager-pdb + name: alertmanager-pdb + namespace: default +spec: + maxUnavailable: 1 + selector: + matchLabels: + name: alertmanager +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: ingester-pdb + name: ingester-pdb + namespace: default +spec: + maxUnavailable: 1 + selector: + matchLabels: + name: ingester +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: store-gateway-pdb + name: store-gateway-pdb + namespace: default +spec: + maxUnavailable: 2 + selector: + matchLabels: + name: store-gateway +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: consul-sidekick + namespace: default +--- +apiVersion: v1 +data: + consul-config.json: '{"leave_on_terminate": true, "raft_snapshot_threshold": 128, + "raft_trailing_logs": 10000, "telemetry": {"dogstatsd_addr": "127.0.0.1:9125"}}' + mapping: | + mappings: + - match: consul.*.runtime.* + name: consul_runtime + labels: + type: $2 + - match: consul.runtime.total_gc_pause_ns + name: consul_runtime_total_gc_pause_ns + labels: + type: $2 + - match: consul.consul.health.service.query-tag.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3 + - match: consul.consul.health.service.query-tag.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4 + - match: consul.consul.health.service.query-tag.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11.$12 + - match: consul.consul.catalog.deregister + name: consul_catalog_deregister + labels: {} + - match: consul.consul.dns.domain_query.*.*.*.*.* + name: consul_dns_domain_query + labels: + query: $1.$2.$3.$4.$5 + - match: consul.consul.health.service.not-found.* + name: consul_health_service_not_found + labels: + query: $1 + - match: consul.consul.health.service.query.* + name: consul_health_service_query + labels: + query: $1 + - match: consul.*.memberlist.health.score + name: consul_memberlist_health_score + labels: {} + - match: consul.serf.queue.* + name: consul_serf_events + labels: + type: $1 + - match: consul.serf.snapshot.appendLine + name: consul_serf_snapshot_appendLine + labels: + type: $1 + - match: consul.serf.coordinate.adjustment-ms + name: consul_serf_coordinate_adjustment_ms + labels: {} + - match: consul.consul.rpc.query + name: consul_rpc_query + labels: {} + - match: consul.*.consul.session_ttl.active + name: consul_session_ttl_active + labels: {} + - match: consul.raft.rpc.* + name: consul_raft_rpc + labels: + type: $1 + - match: consul.raft.rpc.appendEntries.storeLogs + name: consul_raft_rpc_appendEntries_storeLogs + labels: + type: $1 + - match: consul.consul.fsm.persist + name: consul_fsm_persist + labels: {} + - match: consul.raft.fsm.apply + name: consul_raft_fsm_apply + labels: {} + - match: consul.raft.leader.lastContact + name: consul_raft_leader_lastcontact + labels: {} + - match: consul.raft.leader.dispatchLog + name: consul_raft_leader_dispatchLog + labels: {} + - match: consul.raft.commitTime + name: consul_raft_commitTime + labels: {} + - match: consul.raft.replication.appendEntries.logs.*.*.*.* + name: consul_raft_replication_appendEntries_logs + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.raft.replication.appendEntries.rpc.*.*.*.* + name: consul_raft_replication_appendEntries_rpc + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.raft.replication.heartbeat.*.*.*.* + name: consul_raft_replication_heartbeat + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.consul.rpc.request + name: consul_rpc_requests + labels: {} + - match: consul.consul.rpc.accept_conn + name: consul_rpc_accept_conn + labels: {} + - match: consul.memberlist.udp.* + name: consul_memberlist_udp + labels: + type: $1 + - match: consul.memberlist.tcp.* + name: consul_memberlist_tcp + labels: + type: $1 + - match: consul.memberlist.gossip + name: consul_memberlist_gossip + labels: {} + - match: consul.memberlist.probeNode + name: consul_memberlist_probenode + labels: {} + - match: consul.memberlist.pushPullNode + name: consul_memberlist_pushpullnode + labels: {} + - match: consul.http.* + name: consul_http_request + labels: + method: $1 + path: / + - match: consul.http.*.* + name: consul_http_request + labels: + method: $1 + path: /$2 + - match: consul.http.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3 + - match: consul.http.*.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3/$4 + - match: consul.http.*.*.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3/$4/$5 + - match: consul.consul.leader.barrier + name: consul_leader_barrier + labels: {} + - match: consul.consul.leader.reconcileMember + name: consul_leader_reconcileMember + labels: {} + - match: consul.consul.leader.reconcile + name: consul_leader_reconcile + labels: {} + - match: consul.consul.fsm.coordinate.batch-update + name: consul_fsm_coordinate_batch_update + labels: {} + - match: consul.consul.fsm.autopilot + name: consul_fsm_autopilot + labels: {} + - match: consul.consul.fsm.kvs.cas + name: consul_fsm_kvs_cas + labels: {} + - match: consul.consul.fsm.register + name: consul_fsm_register + labels: {} + - match: consul.consul.fsm.deregister + name: consul_fsm_deregister + labels: {} + - match: consul.consul.fsm.tombstone.reap + name: consul_fsm_tombstone_reap + labels: {} + - match: consul.consul.catalog.register + name: consul_catalog_register + labels: {} + - match: consul.consul.catalog.deregister + name: consul_catalog_deregister + labels: {} + - match: consul.consul.leader.reapTombstones + name: consul_leader_reapTombstones + labels: {} +kind: ConfigMap +metadata: + name: consul + namespace: default +--- +apiVersion: v1 +data: + overrides.yaml: | + overrides: {} +kind: ConfigMap +metadata: + name: overrides + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: consul-sidekick + namespace: default +rules: +- apiGroups: + - "" + - extensions + - apps + resources: + - pods + - replicasets + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: consul-sidekick + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: consul-sidekick +subjects: +- kind: ServiceAccount + name: consul-sidekick + namespace: default +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: alertmanager + name: alertmanager + namespace: default +spec: + clusterIP: None + ports: + - name: alertmanager-http-metrics + port: 8080 + targetPort: 8080 + - name: alertmanager-grpc + port: 9095 + targetPort: 9095 + selector: + name: alertmanager +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: compactor + name: compactor + namespace: default +spec: + clusterIP: None + ports: + - name: compactor-http-metrics + port: 8080 + targetPort: 8080 + - name: compactor-grpc + port: 9095 + targetPort: 9095 + selector: + name: compactor +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: consul + name: consul + namespace: default +spec: + ports: + - name: consul-server + port: 8300 + targetPort: 8300 + - name: consul-serf + port: 8301 + targetPort: 8301 + - name: consul-client + port: 8400 + targetPort: 8400 + - name: consul-api + port: 8500 + targetPort: 8500 + - name: statsd-exporter-http-metrics + port: 8000 + targetPort: 8000 + - name: consul-exporter-http-metrics + port: 9107 + targetPort: 9107 + selector: + name: consul +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: distributor + name: distributor + namespace: default +spec: + clusterIP: None + ports: + - name: distributor-http-metrics + port: 8080 + targetPort: 8080 + - name: distributor-grpc + port: 9095 + targetPort: 9095 + selector: + name: distributor +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ingester + name: ingester + namespace: default +spec: + ports: + - name: ingester-http-metrics + port: 8080 + targetPort: 8080 + - name: ingester-grpc + port: 9095 + targetPort: 9095 + selector: + name: ingester +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached + name: memcached + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-frontend + name: memcached-frontend + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-index-queries + name: memcached-index-queries + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-index-queries +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-metadata + name: memcached-metadata + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-metadata +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: querier + name: querier + namespace: default +spec: + ports: + - name: querier-http-metrics + port: 8080 + targetPort: 8080 + - name: querier-grpc + port: 9095 + targetPort: 9095 + selector: + name: querier +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-frontend + name: query-frontend + namespace: default +spec: + ports: + - name: query-frontend-http-metrics + port: 8080 + targetPort: 8080 + - name: query-frontend-grpc + port: 9095 + targetPort: 9095 + selector: + name: query-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-frontend + name: query-frontend-discovery + namespace: default +spec: + clusterIP: None + ports: + - name: query-frontend-http-metrics + port: 8080 + targetPort: 8080 + - name: query-frontend-grpc + port: 9095 + targetPort: 9095 + publishNotReadyAddresses: true + selector: + name: query-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-scheduler + name: query-scheduler + namespace: default +spec: + ports: + - name: query-scheduler-http-metrics + port: 8080 + targetPort: 8080 + - name: query-scheduler-grpc + port: 9095 + targetPort: 9095 + selector: + name: query-scheduler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-scheduler + name: query-scheduler-discovery + namespace: default +spec: + clusterIP: None + ports: + - name: query-scheduler-http-metrics + port: 8080 + targetPort: 8080 + - name: query-scheduler-grpc + port: 9095 + targetPort: 9095 + publishNotReadyAddresses: true + selector: + name: query-scheduler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ruler + name: ruler + namespace: default +spec: + ports: + - name: ruler-http-metrics + port: 8080 + targetPort: 8080 + - name: ruler-grpc + port: 9095 + targetPort: 9095 + selector: + name: ruler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ruler-querier + name: ruler-querier + namespace: default +spec: + ports: + - name: ruler-querier-http-metrics + port: 8080 + targetPort: 8080 + - name: ruler-querier-grpc + port: 9095 + targetPort: 9095 + selector: + name: ruler-querier +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ruler-query-frontend + name: ruler-query-frontend + namespace: default +spec: + clusterIP: None + ports: + - name: ruler-query-frontend-http-metrics + port: 8080 + targetPort: 8080 + - name: ruler-query-frontend-grpc + port: 9095 + targetPort: 9095 + selector: + name: ruler-query-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ruler-query-scheduler + name: ruler-query-scheduler + namespace: default +spec: + ports: + - name: ruler-query-scheduler-http-metrics + port: 8080 + targetPort: 8080 + - name: ruler-query-scheduler-grpc + port: 9095 + targetPort: 9095 + selector: + name: ruler-query-scheduler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ruler-query-scheduler + name: ruler-query-scheduler-discovery + namespace: default +spec: + clusterIP: None + ports: + - name: ruler-query-scheduler-http-metrics + port: 8080 + targetPort: 8080 + - name: ruler-query-scheduler-grpc + port: 9095 + targetPort: 9095 + publishNotReadyAddresses: true + selector: + name: ruler-query-scheduler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: store-gateway + name: store-gateway + namespace: default +spec: + ports: + - name: store-gateway-http-metrics + port: 8080 + targetPort: 8080 + - name: store-gateway-grpc + port: 9095 + targetPort: 9095 + selector: + name: store-gateway +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: consul + namespace: default +spec: + minReadySeconds: 10 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: consul + template: + metadata: + annotations: + consul-hash: e56ef6821a3557604caccaf6d5820239 + labels: + name: consul + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: consul + topologyKey: kubernetes.io/hostname + - labelSelector: + matchLabels: + name: ingester + namespaces: + - default + topologyKey: kubernetes.io/hostname + containers: + - args: + - agent + - -ui + - -server + - -client=0.0.0.0 + - -config-file=/etc/config/consul-config.json + - -bootstrap-expect=1 + - -ui-content-path=/default/consul/ + env: + - name: CHECKPOINT_DISABLE + value: "1" + image: consul:1.5.3 + imagePullPolicy: IfNotPresent + name: consul + ports: + - containerPort: 8300 + name: server + - containerPort: 8301 + name: serf + - containerPort: 8400 + name: client + - containerPort: 8500 + name: api + resources: + requests: + cpu: "4" + memory: 4Gi + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --namespace=$(POD_NAMESPACE) + - --pod-name=$(POD_NAME) + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + image: weaveworks/consul-sidekick:master-f18ad13 + imagePullPolicy: IfNotPresent + name: sidekick + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --web.listen-address=:8000 + - --statsd.mapping-config=/etc/config/mapping + image: prom/statsd-exporter:v0.12.2 + imagePullPolicy: IfNotPresent + name: statsd-exporter + ports: + - containerPort: 8000 + name: http-metrics + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --consul.server=localhost:8500 + - --web.listen-address=:9107 + - --consul.timeout=1s + - --no-consul.health-summary + - --consul.allow_stale + image: prom/consul-exporter:v0.5.0 + imagePullPolicy: IfNotPresent + name: consul-exporter + ports: + - containerPort: 9107 + name: http-metrics + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + serviceAccount: consul-sidekick + volumes: + - configMap: + name: consul + name: consul + - emptyDir: + medium: Memory + name: data +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: distributor + namespace: default +spec: + minReadySeconds: 10 + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: distributor + strategy: + rollingUpdate: + maxSurge: 5 + maxUnavailable: 1 + template: + metadata: + labels: + name: distributor + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: distributor + topologyKey: kubernetes.io/hostname + containers: + - args: + - -distributor.ha-tracker.enable=true + - -distributor.ha-tracker.enable-for-all-users=true + - -distributor.ha-tracker.etcd.endpoints=etcd-client.default.svc.cluster.local.:2379 + - -distributor.ha-tracker.prefix=prom_ha/ + - -distributor.ha-tracker.store=etcd + - -distributor.health-check-ingesters=true + - -distributor.ingestion-burst-size=200000 + - -distributor.ingestion-rate-limit=10000 + - -distributor.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -distributor.ring.prefix= + - -distributor.ring.store=consul + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -mem-ballast-size-bytes=1073741824 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.max-connection-age=2m + - -server.grpc.keepalive.max-connection-age-grace=5m + - -server.grpc.keepalive.max-connection-idle=1m + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=distributor + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: distributor + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 4Gi + requests: + cpu: "2" + memory: 2Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: querier + namespace: default +spec: + minReadySeconds: 10 + replicas: 6 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: querier + strategy: + rollingUpdate: + maxSurge: 5 + maxUnavailable: 1 + template: + metadata: + labels: + name: querier + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: querier + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -distributor.health-check-ingesters=true + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -mem-ballast-size-bytes=268435456 + - -querier.frontend-client.grpc-max-send-msg-size=104857600 + - -querier.max-concurrent=8 + - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -server.http-write-timeout=1m + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=consul + - -store.max-query-length=768h + - -target=querier + env: + - name: JAEGER_REPORTER_MAX_QUEUE_SIZE + value: "1024" + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: querier + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 24Gi + requests: + cpu: "1" + memory: 12Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: query-frontend + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: query-frontend + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + template: + metadata: + labels: + name: query-frontend + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: query-frontend + topologyKey: kubernetes.io/hostname + containers: + - args: + - -query-frontend.align-querier-with-step=false + - -query-frontend.cache-results=true + - -query-frontend.max-cache-freshness=10m + - -query-frontend.results-cache.backend=memcached + - -query-frontend.results-cache.memcached.addresses=dnssrvnoa+memcached-frontend.default.svc.cluster.local:11211 + - -query-frontend.results-cache.memcached.timeout=500ms + - -query-frontend.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -server.http-write-timeout=1m + - -store.max-query-length=12000h + - -target=query-frontend + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: query-frontend + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 1200Mi + requests: + cpu: "2" + memory: 600Mi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: query-scheduler + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: query-scheduler + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + name: query-scheduler + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: query-scheduler + topologyKey: kubernetes.io/hostname + containers: + - args: + - -query-scheduler.max-outstanding-requests-per-tenant=100 + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=query-scheduler + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: query-scheduler + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 2Gi + requests: + cpu: "2" + memory: 1Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ruler + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: ruler + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + name: ruler + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: ruler + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -distributor.health-check-ingesters=true + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -ruler-storage.backend=gcs + - -ruler-storage.gcs.bucket-name=rules-bucket + - -ruler.alertmanager-url=http://alertmanager.default.svc.cluster.local/alertmanager + - -ruler.max-rule-groups-per-tenant=35 + - -ruler.max-rules-per-rule-group=20 + - -ruler.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ruler.ring.store=consul + - -ruler.rule-path=/rules + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=consul + - -store.max-query-length=768h + - -target=ruler + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ruler + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + cpu: "16" + memory: 16Gi + requests: + cpu: "1" + memory: 6Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + terminationGracePeriodSeconds: 600 + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ruler-querier + namespace: default +spec: + minReadySeconds: 10 + replicas: 6 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: ruler-querier + strategy: + rollingUpdate: + maxSurge: 5 + maxUnavailable: 1 + template: + metadata: + labels: + name: ruler-querier + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: ruler-querier + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -distributor.health-check-ingesters=true + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -mem-ballast-size-bytes=268435456 + - -querier.frontend-client.grpc-max-send-msg-size=104857600 + - -querier.max-concurrent=8 + - -querier.scheduler-address=ruler-query-scheduler-discovery.default.svc.cluster.local:9095 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -server.http-write-timeout=1m + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=consul + - -store.max-query-length=768h + - -target=querier + env: + - name: JAEGER_REPORTER_MAX_QUEUE_SIZE + value: "1024" + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ruler-querier + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 24Gi + requests: + cpu: "1" + memory: 12Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ruler-query-frontend + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: ruler-query-frontend + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + template: + metadata: + labels: + name: ruler-query-frontend + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: ruler-query-frontend + topologyKey: kubernetes.io/hostname + containers: + - args: + - -query-frontend.align-querier-with-step=false + - -query-frontend.cache-results=false + - -query-frontend.max-cache-freshness=10m + - -query-frontend.scheduler-address=ruler-query-scheduler-discovery.default.svc.cluster.local:9095 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -server.http-write-timeout=1m + - -store.max-query-length=12000h + - -target=query-frontend + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ruler-query-frontend + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 1200Mi + requests: + cpu: "2" + memory: 600Mi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ruler-query-scheduler + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: ruler-query-scheduler + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + name: ruler-query-scheduler + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: ruler-query-scheduler + topologyKey: kubernetes.io/hostname + containers: + - args: + - -query-scheduler.max-outstanding-requests-per-tenant=100 + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=query-scheduler + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ruler-query-scheduler + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 2Gi + requests: + cpu: "2" + memory: 1Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: alertmanager + name: alertmanager + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: alertmanager + serviceName: alertmanager + template: + metadata: + labels: + name: alertmanager + spec: + containers: + - args: + - -alertmanager-storage.backend=gcs + - -alertmanager-storage.gcs.bucket-name=alerts-bucket + - -alertmanager.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -alertmanager.sharding-ring.replication-factor=3 + - -alertmanager.sharding-ring.store=consul + - -alertmanager.storage.path=/data + - -alertmanager.web.external-url=http://test/alertmanager + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=alertmanager + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: alertmanager + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 15Gi + requests: + cpu: "2" + memory: 10Gi + volumeMounts: + - mountPath: /data + name: alertmanager-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 900 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: alertmanager-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: compactor + name: compactor + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + name: compactor + serviceName: compactor + template: + metadata: + labels: + name: compactor + spec: + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -compactor.block-ranges=2h,12h,24h + - -compactor.blocks-retention-period=0 + - -compactor.cleanup-interval=15m + - -compactor.compaction-concurrency=1 + - -compactor.compaction-interval=30m + - -compactor.compactor-tenant-shard-size=1 + - -compactor.data-dir=/data + - -compactor.deletion-delay=2h + - -compactor.max-closing-blocks-concurrency=2 + - -compactor.max-opening-blocks-concurrency=4 + - -compactor.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -compactor.ring.prefix= + - -compactor.ring.store=consul + - -compactor.ring.wait-stability-min-duration=1m + - -compactor.split-and-merge-shards=0 + - -compactor.split-groups=1 + - -compactor.symbols-flushers-concurrency=4 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=compactor + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: compactor + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 6Gi + requests: + cpu: 1 + memory: 6Gi + volumeMounts: + - mountPath: /data + name: compactor-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 900 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: compactor-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 250Gi + storageClassName: standard +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: ingester + name: ingester + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 3 + selector: + matchLabels: + name: ingester + serviceName: ingester + template: + metadata: + labels: + name: ingester + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: ingester + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -blocks-storage.tsdb.block-ranges-period=2h + - -blocks-storage.tsdb.dir=/data/tsdb + - -blocks-storage.tsdb.ship-interval=1m + - -distributor.health-check-ingesters=true + - -ingester.max-global-series-per-metric=20000 + - -ingester.max-global-series-per-user=150000 + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-period=15s + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.num-tokens=512 + - -ingester.ring.prefix= + - -ingester.ring.readiness-check-ring-health=false + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -ingester.ring.tokens-file-path=/data/tokens + - -ingester.ring.unregister-on-shutdown=true + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc-max-concurrent-streams=10000 + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=ingester + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ingester + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 25Gi + requests: + cpu: "4" + memory: 15Gi + volumeMounts: + - mountPath: /data + name: ingester-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 1200 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: ingester-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi + storageClassName: fast +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached + serviceName: memcached + template: + metadata: + labels: + name: memcached + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 6144 + - -I 1m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 9Gi + requests: + cpu: 500m + memory: 6552Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-frontend + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached-frontend + serviceName: memcached-frontend + template: + metadata: + labels: + name: memcached-frontend + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-frontend + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 1024 + - -I 5m + - -c 1024 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 1536Mi + requests: + cpu: 500m + memory: 1329Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-index-queries + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached-index-queries + serviceName: memcached-index-queries + template: + metadata: + labels: + name: memcached-index-queries + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-index-queries + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 1024 + - -I 5m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 1536Mi + requests: + cpu: 500m + memory: 1329Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-metadata + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + name: memcached-metadata + serviceName: memcached-metadata + template: + metadata: + labels: + name: memcached-metadata + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-metadata + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 512 + - -I 1m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 768Mi + requests: + cpu: 500m + memory: 715Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: store-gateway + name: store-gateway + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 3 + selector: + matchLabels: + name: store-gateway + serviceName: store-gateway + template: + metadata: + labels: + name: store-gateway + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: store-gateway + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.chunks-cache.backend=memcached + - -blocks-storage.bucket-store.chunks-cache.memcached.addresses=dnssrvnoa+memcached.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms + - -blocks-storage.bucket-store.index-cache.backend=memcached + - -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880 + - -blocks-storage.bucket-store.index-header-lazy-loading-enabled=true + - -blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout=60m + - -blocks-storage.bucket-store.max-chunk-pool-bytes=12884901888 + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.tokens-file-path=/data/tokens + - -store-gateway.sharding-ring.wait-stability-min-duration=1m + - -target=store-gateway + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: store-gateway + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 18Gi + requests: + cpu: "1" + memory: 12Gi + volumeMounts: + - mountPath: /data + name: store-gateway-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 120 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: store-gateway-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Gi + storageClassName: standard +--- +apiVersion: etcd.database.coreos.com/v1beta2 +kind: EtcdCluster +metadata: + annotations: + etcd.database.coreos.com/scope: clusterwide + name: etcd + namespace: default +spec: + pod: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + etcd_cluster: etcd + topologyKey: kubernetes.io/hostname + annotations: + prometheus.io/port: "2379" + prometheus.io/scrape: "true" + etcdEnv: + - name: ETCD_AUTO_COMPACTION_RETENTION + value: 1h + labels: + name: etcd + resources: + limits: + memory: 512Mi + requests: + cpu: 500m + memory: 512Mi + size: 3 + version: 3.3.13 diff --git a/operations/mimir-tests/test-ruler-remote-evaluation-migration.jsonnet b/operations/mimir-tests/test-ruler-remote-evaluation-migration.jsonnet new file mode 100644 index 0000000000..1cbc228d31 --- /dev/null +++ b/operations/mimir-tests/test-ruler-remote-evaluation-migration.jsonnet @@ -0,0 +1,24 @@ +local mimir = import 'mimir/mimir.libsonnet'; + +mimir { + _config+:: { + namespace: 'default', + external_url: 'http://test', + + blocks_storage_backend: 'gcs', + blocks_storage_bucket_name: 'blocks-bucket', + bucket_index_enabled: true, + query_scheduler_enabled: true, + + ruler_enabled: true, + ruler_client_type: 'gcs', + ruler_storage_bucket_name: 'rules-bucket', + + alertmanager_enabled: true, + alertmanager_client_type: 'gcs', + alertmanager_gcs_bucket_name: 'alerts-bucket', + + ruler_remote_evaluation_enabled: true, + ruler_remote_evaluation_migration_enabled: true, + }, +} diff --git a/operations/mimir-tests/test-ruler-remote-evaluation.jsonnet b/operations/mimir-tests/test-ruler-remote-evaluation.jsonnet new file mode 100644 index 0000000000..3dbb931819 --- /dev/null +++ b/operations/mimir-tests/test-ruler-remote-evaluation.jsonnet @@ -0,0 +1,23 @@ +local mimir = import 'mimir/mimir.libsonnet'; + +mimir { + _config+:: { + namespace: 'default', + external_url: 'http://test', + + blocks_storage_backend: 'gcs', + blocks_storage_bucket_name: 'blocks-bucket', + bucket_index_enabled: true, + query_scheduler_enabled: true, + + ruler_enabled: true, + ruler_client_type: 'gcs', + ruler_storage_bucket_name: 'rules-bucket', + + alertmanager_enabled: true, + alertmanager_client_type: 'gcs', + alertmanager_gcs_bucket_name: 'alerts-bucket', + + ruler_remote_evaluation_enabled: true, + }, +} diff --git a/operations/mimir/mimir.libsonnet b/operations/mimir/mimir.libsonnet index 4840888c96..b362fff9d3 100644 --- a/operations/mimir/mimir.libsonnet +++ b/operations/mimir/mimir.libsonnet @@ -26,6 +26,7 @@ (import 'multi-zone.libsonnet') + (import 'memberlist.libsonnet') + (import 'continuous-test.libsonnet') + +(import 'ruler-remote-evaluation.libsonnet') + // Import autoscaling at the end because it overrides deployments. (import 'autoscaling.libsonnet') diff --git a/operations/mimir/ruler-remote-evaluation.libsonnet b/operations/mimir/ruler-remote-evaluation.libsonnet new file mode 100644 index 0000000000..974f4f7451 --- /dev/null +++ b/operations/mimir/ruler-remote-evaluation.libsonnet @@ -0,0 +1,84 @@ +// Deployment of a dedicated query path for ruler remote evaluation. +{ + _config+:: { + ruler_remote_evaluation_enabled: false, + ruler_remote_evaluation_migration_enabled: false, + + // Note: There is no option to disable ruler-query-scheduler. + }, + + local useRulerQueryFrontend = $._config.ruler_remote_evaluation_enabled && !$._config.ruler_remote_evaluation_migration_enabled, + + ruler_args+:: if !useRulerQueryFrontend then {} else { + 'ruler.query-frontend.address': 'dns:///ruler-query-frontend.%(namespace)s.svc.cluster.local:9095' % $._config, + }, + + local container = $.core.v1.container, + local deployment = $.apps.v1.deployment, + local service = $.core.v1.service, + + local queryFrontendDisableCacheArgs = + { + // Query cache is of no benefit to rule evaluation. + 'query-frontend.cache-results': false, + 'query-frontend.results-cache.backend': null, + 'query-frontend.results-cache.memcached.addresses': null, + 'query-frontend.results-cache.memcached.timeout': null, + }, + + // + // Querier + // + + ruler_querier_args+:: + $.querier_args + + $.querierUseQuerySchedulerArgs('ruler-query-scheduler'), + + ruler_querier_container:: + $.newQuerierContainer('ruler-querier', $.ruler_querier_args), + + ruler_querier_deployment: if !$._config.ruler_remote_evaluation_enabled then {} else + $.newQuerierDeployment('ruler-querier', $.ruler_querier_container), + + ruler_querier_service: if !$._config.ruler_remote_evaluation_enabled then {} else + $.util.serviceFor($.ruler_querier_deployment, $._config.service_ignored_labels), + + // + // Query Frontend + // + + ruler_query_frontend_args+:: + $.query_frontend_args + + $.queryFrontendUseQuerySchedulerArgs('ruler-query-scheduler') + + queryFrontendDisableCacheArgs, + + ruler_query_frontend_container:: + $.newQueryFrontendContainer('ruler-query-frontend', $.ruler_query_frontend_args), + + ruler_query_frontend_deployment: if !$._config.ruler_remote_evaluation_enabled then {} else + $.newQueryFrontendDeployment('ruler-query-frontend', $.ruler_query_frontend_container), + + ruler_query_frontend_service: if !$._config.ruler_remote_evaluation_enabled then {} else + $.util.serviceFor($.ruler_query_frontend_deployment, $._config.service_ignored_labels) + + // Note: We use a headless service because the ruler uses gRPC load balancing. + service.mixin.spec.withClusterIp('None'), + + // + // Query Scheduler + // + + ruler_query_scheduler_args+:: + $.query_scheduler_args, + + ruler_query_scheduler_container:: + $.newQuerySchedulerContainer('ruler-query-scheduler', $.ruler_query_scheduler_args), + + ruler_query_scheduler_deployment: if !$._config.ruler_remote_evaluation_enabled then {} else + $.newQuerySchedulerDeployment('ruler-query-scheduler', $.ruler_query_scheduler_container), + + ruler_query_scheduler_service: if !$._config.ruler_remote_evaluation_enabled then {} else + $.util.serviceFor($.ruler_query_scheduler_deployment, $._config.service_ignored_labels), + + ruler_query_scheduler_discovery_service: if !$._config.ruler_remote_evaluation_enabled then {} else + $.newQuerySchedulerDiscoveryService('ruler-query-scheduler', $.ruler_query_scheduler_deployment), +} From b13d2dfc827d023d33910441ffa537af6bee5e02 Mon Sep 17 00:00:00 2001 From: Nick Pillitteri <56quarters@users.noreply.github.com> Date: Mon, 20 Jun 2022 09:04:03 -0400 Subject: [PATCH 03/63] Set shuffle sharding ingester lookback automatically (#2110) This change deprecates the `querier.shuffle-sharding-ingesters-lookback-period` option, instead setting the lookback period from the value of the `querier.query-ingesters-within` option. Outside of our own integration tests, there's no situation where it's useful to set the lookback period to a different value than the `query-ingesters-within` option. This also adds a new option `querier.shuffle-sharding-ingesters-enabled` that can be used to enable or disable shuffle sharding on ingesters on the read path instead of using the lookback period for this. Fixes #1810 Signed-off-by: Nick Pillitteri --- CHANGELOG.md | 3 ++- cmd/mimir/config-descriptor.json | 10 +++---- cmd/mimir/help-all.txt.tmpl | 4 +-- .../configuring-shuffle-sharding/index.md | 26 ++++++++++++------- .../index.md | 15 +++++------ .../scaling-out.md | 6 ++--- integration/ingester_sharding_test.go | 17 +++++++++--- operations/mimir/shuffle-sharding.libsonnet | 6 ++--- pkg/distributor/distributor.go | 2 +- pkg/distributor/distributor_test.go | 13 +++++----- pkg/mimir/mimir.go | 2 +- pkg/mimir/modules.go | 9 ++++++- pkg/querier/querier.go | 20 ++++++-------- pkg/querier/querier_test.go | 13 ---------- 14 files changed, 74 insertions(+), 72 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index eb0797375e..a9c8fa220f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,13 +8,14 @@ * [CHANGE] Default values have changed for the following settings. This improves query performance for recent data (within 12h) by only reading from ingesters: #1909 #1921 - `-blocks-storage.bucket-store.ignore-blocks-within` now defaults to `10h` (previously `0`) - `-querier.query-store-after` now defaults to `12h` (previously `0`) - - `-querier.shuffle-sharding-ingesters-lookback-period` now defaults to `13h` (previously `0`) + * [CHANGE] The following settings are now classified as advanced because the defaults should work for most users and tuning them requires in-depth knowledge of how the read path works: #1929 - `-querier.query-ingesters-within` - `-querier.query-store-after` * [CHANGE] Config flag category overrides can be set dynamically at runtime. #1934 * [CHANGE] Ingester: deprecated `-ingester.ring.join-after`. Mimir now behaves as this setting is always set to 0s. This configuration option will be removed in Mimir 2.4.0. #1965 * [CHANGE] Blocks uploaded by ingester no longer contain `__org_id__` label. Compactor now ignores this label and will compact blocks with and without this label together. `mimirconvert` tool will remove the label from blocks as "unknown" label. #1972 +* [CHANGE] Querier: deprecated `-querier.shuffle-sharding-ingesters-lookback-period`, instead adding `-querier.shuffle-sharding-ingesters-enabled` to enable or disable shuffle sharding on the read path. The value of `-querier.query-ingesters-within` is now used internally for shuffle sharding lookback. #2110 * [ENHANCEMENT] Distributor: Added limit to prevent tenants from sending excessive number of requests: #1843 * The following CLI flags (and their respective YAML config options) have been added: * `-distributor.request-rate-limit` diff --git a/cmd/mimir/config-descriptor.json b/cmd/mimir/config-descriptor.json index 93b450125f..e3da1f23c7 100644 --- a/cmd/mimir/config-descriptor.json +++ b/cmd/mimir/config-descriptor.json @@ -1491,13 +1491,13 @@ }, { "kind": "field", - "name": "shuffle_sharding_ingesters_lookback_period", + "name": "shuffle_sharding_ingesters_enabled", "required": false, - "desc": "When this setting is \u003e 0, queriers fetch in-memory series from the minimum set of required ingesters, selecting only ingesters which may have received series since 'now - lookback period'. The lookback period should be greater or equal than the configured -querier.query-store-after and -querier.query-ingesters-within. If this setting is 0, queriers always query all ingesters (ingesters shuffle sharding on read path is disabled).", + "desc": "Fetch in-memory series from the minimum set of required ingesters, selecting only ingesters which may have received series since -querier.query-ingesters-within. If this setting is false or -querier.query-ingesters-within is '0', queriers always query all ingesters (ingesters shuffle sharding on read path is disabled).", "fieldValue": null, - "fieldDefaultValue": 46800000000000, - "fieldFlag": "querier.shuffle-sharding-ingesters-lookback-period", - "fieldType": "duration", + "fieldDefaultValue": true, + "fieldFlag": "querier.shuffle-sharding-ingesters-enabled", + "fieldType": "boolean", "fieldCategory": "advanced" }, { diff --git a/cmd/mimir/help-all.txt.tmpl b/cmd/mimir/help-all.txt.tmpl index 363edb9380..ead4580500 100644 --- a/cmd/mimir/help-all.txt.tmpl +++ b/cmd/mimir/help-all.txt.tmpl @@ -1095,8 +1095,8 @@ Usage of ./cmd/mimir/mimir: The time after which a metric should be queried from storage and not just ingesters. 0 means all queries are sent to store. If this option is enabled, the time range of the query sent to the store-gateway will be manipulated to ensure the query end is not more recent than 'now - query-store-after'. (default 12h0m0s) -querier.scheduler-address string Address of the query-scheduler component, in host:port format. Only one of -querier.frontend-address or -querier.scheduler-address can be set. If neither is set, queries are only received via HTTP endpoint. - -querier.shuffle-sharding-ingesters-lookback-period duration - When this setting is > 0, queriers fetch in-memory series from the minimum set of required ingesters, selecting only ingesters which may have received series since 'now - lookback period'. The lookback period should be greater or equal than the configured -querier.query-store-after and -querier.query-ingesters-within. If this setting is 0, queriers always query all ingesters (ingesters shuffle sharding on read path is disabled). (default 13h0m0s) + -querier.shuffle-sharding-ingesters-enabled + Fetch in-memory series from the minimum set of required ingesters, selecting only ingesters which may have received series since -querier.query-ingesters-within. If this setting is false or -querier.query-ingesters-within is '0', queriers always query all ingesters (ingesters shuffle sharding on read path is disabled). (default true) -querier.store-gateway-client.tls-ca-path string Path to the CA certificates file to validate server certificate against. If not set, the host's root CA certificates are used. -querier.store-gateway-client.tls-cert-path string diff --git a/docs/sources/operators-guide/configuring/configuring-shuffle-sharding/index.md b/docs/sources/operators-guide/configuring/configuring-shuffle-sharding/index.md index d9cbdb15fb..d980118d9c 100644 --- a/docs/sources/operators-guide/configuring/configuring-shuffle-sharding/index.md +++ b/docs/sources/operators-guide/configuring/configuring-shuffle-sharding/index.md @@ -101,10 +101,15 @@ To enable shuffle sharding for ingesters on the write path, configure the follow Assuming that you have enabled shuffle sharding for the write path, to enable shuffle sharding for ingesters on the read path, configure the following flags (or their respective YAML configuration options) on the querier and ruler: - `-distributor.ingestion-tenant-shard-size=` -- `-querier.shuffle-sharding-ingesters-lookback-period=`
- Queriers and rulers fetch in-memory series from the minimum set of required ingesters, selecting only ingesters which might have received series since 'now - lookback period'. - The configured lookback `` should be: - - greater than or equal to `-querier.query-store-after` and `-querier.query-ingesters-within` and, + +The following flags are set appropriately by default to enable shuffle sharding for ingesters on the read path. If you need to modify their defaults: + +- `-querier.shuffle-sharding-ingesters-enabled=true`
+ Shuffle sharding for ingesters on the read path can be explicitly enabled or disabled. +- `-querier.query-ingesters-within=`
+ Queriers and rulers fetch in-memory series from the minimum set of required ingesters, selecting only ingesters which might have received series since 'now - query ingesters within'. If this period is `0`, shuffle sharding for ingesters on the read path is disabled, which means all ingesters in the Mimir cluster are queried for any tenant. + The configured `` should be: + - greater than `-querier.query-store-after` and, - greater than the estimated minimum amount of time for the oldest samples stored in a block uploaded by ingester to be discovered and available for querying. When running Grafana Mimir with the default configuration, the estimated minimum amount of time for the oldest sample in a uploaded block to be available for querying is `3h`. @@ -115,23 +120,24 @@ Keeping ingesters shuffle sharding enabled only on the write path does not lead If you’re running a Grafana Mimir cluster with shuffle sharding disabled, and you want to enable it for the ingesters, use the following rollout strategy to avoid missing querying for any series currently in the ingesters: +1. Explicitly disable ingesters shuffle-sharding on the read path via `-querier.shuffle-sharding-ingesters-enabled=false` since this is enabled by default. 1. Enable ingesters shuffle sharding on the write path. -1. Wait for at least the amount of time specified via `-querier.shuffle-sharding-ingesters-lookback-period`. -1. Enable ingesters shuffle-sharding on the read path. +1. Wait for at least the amount of time specified via `-querier.query-ingesters-within`. +1. Enable ingesters shuffle-sharding on the read path via `-querier.shuffle-sharding-ingesters-enabled=true`. #### Limitation: Decreasing the tenant shard size The current shuffle sharding implementation in Grafana Mimir has a limitation that prevents you from safely decreasing the tenant shard size when you enable ingesters’ shuffle sharding on the read path. If a tenant’s shard decreases in size, there is currently no way for the queriers and rulers to know how large the tenant shard was previously, and as a result, they potentially miss an ingester with data for that tenant. -The lookback mechanism, which is used to select the ingesters that might have received series since 'now - lookback period', doesn't work correctly if the tenant shard size is decreased. +The query-ingesters-within period, which is used to select the ingesters that might have received series since 'now - query ingesters within', doesn't work correctly for finding tenant shards if the tenant shard size is decreased. Although decreasing the tenant shard size is not supported, consider the following workaround: -1. Disable shuffle sharding on the read path. +1. Disable shuffle sharding on the read path via `-querier.shuffle-sharding-ingesters-enabled=false`. 1. Decrease the configured tenant shard size. -1. Wait for at least the amount of time specified via `-querier.shuffle-sharding-ingesters-lookback-period`. -1. Re-enable shuffle sharding on the read path. +1. Wait for at least the amount of time specified via `-querier.query-ingesters-within`. +1. Re-enable shuffle sharding on the read path via `-querier.shuffle-sharding-ingesters-enabled=true`. ### Query-frontend and query-scheduler shuffle sharding diff --git a/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md b/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md index 5ed653b5f2..cd6d753ac3 100644 --- a/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md +++ b/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md @@ -836,14 +836,13 @@ store_gateway_client: # CLI flag: -querier.store-gateway-client.tls-insecure-skip-verify [tls_insecure_skip_verify: | default = false] -# (advanced) When this setting is > 0, queriers fetch in-memory series from the -# minimum set of required ingesters, selecting only ingesters which may have -# received series since 'now - lookback period'. The lookback period should be -# greater or equal than the configured -querier.query-store-after and -# -querier.query-ingesters-within. If this setting is 0, queriers always query -# all ingesters (ingesters shuffle sharding on read path is disabled). -# CLI flag: -querier.shuffle-sharding-ingesters-lookback-period -[shuffle_sharding_ingesters_lookback_period: | default = 13h] +# (advanced) Fetch in-memory series from the minimum set of required ingesters, +# selecting only ingesters which may have received series since +# -querier.query-ingesters-within. If this setting is false or +# -querier.query-ingesters-within is '0', queriers always query all ingesters +# (ingesters shuffle sharding on read path is disabled). +# CLI flag: -querier.shuffle-sharding-ingesters-enabled +[shuffle_sharding_ingesters_enabled: | default = true] # The maximum number of concurrent queries. This config option should be set on # query-frontend too when query sharding is enabled. diff --git a/docs/sources/operators-guide/running-production-environment/scaling-out.md b/docs/sources/operators-guide/running-production-environment/scaling-out.md index 818e6a6b9f..e7951121cd 100644 --- a/docs/sources/operators-guide/running-production-environment/scaling-out.md +++ b/docs/sources/operators-guide/running-production-environment/scaling-out.md @@ -73,7 +73,7 @@ Complete the following steps to scale down ingesters deployed in a single zone. ``` -querier.query-store-after=0s - -querier.shuffle-sharding-ingesters-lookback-period=87600h + -querier.shuffle-sharding-ingesters-enabled=false ``` b. Configure the compactors to frequently update the bucket index: @@ -106,10 +106,10 @@ Complete the following steps to scale down ingesters deployed in a single zone. c. Send a `SIGINT` or `SIGTERM` signal to the process of the ingester to terminate. - d. Wait 10 minutes before proceeding with the next ingester. The temporarily configuration applied guarantees newly uploaded blocks are available for querying within 10 minutes. + d. Wait 10 minutes before proceeding with the next ingester. The temporarily applied configuration guarantees newly uploaded blocks are available for querying within 10 minutes. 1. Wait until the originally configured `-querier.query-store-after` period of time has elapsed since when all ingesters have been shutdown. -1. Revert the temporarily configuration changes done at the beginning of the scale down procedure. +1. Revert the temporary configuration changes done at the beginning of the scale down procedure. #### Scaling down ingesters deployed in multiple zones diff --git a/integration/ingester_sharding_test.go b/integration/ingester_sharding_test.go index a6be8ba4c6..f778022f98 100644 --- a/integration/ingester_sharding_test.go +++ b/integration/ingester_sharding_test.go @@ -25,6 +25,7 @@ import ( func TestIngesterSharding(t *testing.T) { const numSeriesToPush = 1000 + const queryIngestersWithinSecs = 5 tests := map[string]struct { tenantShardSize int @@ -48,11 +49,13 @@ func TestIngesterSharding(t *testing.T) { flags := BlocksStorageFlags() flags["-distributor.ingestion-tenant-shard-size"] = strconv.Itoa(testData.tenantShardSize) - - // Enable shuffle sharding on read path but not lookback, otherwise all ingesters would be - // queried being just registered. + // We're verifying that shuffle sharding on the read path works so we need to set `query-ingesters-within` + // to a small enough value that they'll have been part of the ring for long enough by the time we attempt + // to query back the values we wrote to them. If they _haven't_ been part of the ring for long enough, the + // query would be sent to all ingesters and our test wouldn't really be testing anything. flags["-querier.query-store-after"] = "0" - flags["-querier.shuffle-sharding-ingesters-lookback-period"] = "1ns" + flags["-querier.query-ingesters-within"] = fmt.Sprintf("%ds", queryIngestersWithinSecs) + flags["-ingester.ring.heartbeat-period"] = "1s" // Start dependencies. consul := e2edb.NewConsul() @@ -77,6 +80,11 @@ func TestIngesterSharding(t *testing.T) { labels.MustNewMatcher(labels.MatchEqual, "name", "ingester"), labels.MustNewMatcher(labels.MatchEqual, "state", "ACTIVE")))) + // Yes, we're sleeping in this test. We need to make sure that the ingesters have been part + // of the ring long enough before writing metrics to them to ensure that only the shuffle + // sharded ingesters will be queried for them when we go to verify the series written. + time.Sleep((queryIngestersWithinSecs + 1) * time.Second) + // Push series. now := time.Now() expectedVectors := map[string]model.Vector{} @@ -109,6 +117,7 @@ func TestIngesterSharding(t *testing.T) { } } + // Verify that the expected number of ingesters had series (write path). require.Equal(t, testData.expectedIngestersWithSeries, numIngestersWithSeries) require.Equal(t, numSeriesToPush, totalIngestedSeries) diff --git a/operations/mimir/shuffle-sharding.libsonnet b/operations/mimir/shuffle-sharding.libsonnet index 3fa96feb30..9c863e117a 100644 --- a/operations/mimir/shuffle-sharding.libsonnet +++ b/operations/mimir/shuffle-sharding.libsonnet @@ -125,10 +125,8 @@ } ) + ( if !($._config.shuffle_sharding.ingester_write_path_enabled && !$._config.shuffle_sharding.ingester_read_path_enabled) then {} else { - // The shuffle-sharding flags in the ruler applies both to read and write path, so we don’t have a way - // to keep it enabled on the write path and disable it only on the read path. However, we can obtain the - // same effect setting the lookback period to a very high value. - 'querier.shuffle-sharding-ingesters-lookback-period': '87600h', // 3650 days. + // If shuffle sharding is enabled for the write path but isn't enabled for the read path, Mimir will query all ingesters + 'querier.shuffle-sharding-ingesters-enabled': 'false', } ) + ( if !$._config.shuffle_sharding.store_gateway_enabled then {} else { diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 7d81f9714b..5e60645c17 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -144,7 +144,7 @@ type Config struct { // this (and should never use it) but this feature is used by other projects built on top of it SkipLabelNameValidation bool `yaml:"-"` - // This config is dynamically injected because defined in the querier config. + // This config is dynamically injected because it is defined in the querier config. ShuffleShardingLookbackPeriod time.Duration `yaml:"-"` // Limits for distributor diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index d705b066a6..006c6d1791 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -2007,7 +2007,7 @@ func TestDistributor_MetricsMetadata(t *testing.T) { for testName, testData := range tests { t.Run(testName, func(t *testing.T) { // Create distributor - ds, ingesters, _ := prepare(t, prepConfig{ + ds, _, _ := prepare(t, prepConfig{ numIngesters: numIngesters, happyIngesters: numIngesters, numDistributors: 1, @@ -2022,16 +2022,15 @@ func TestDistributor_MetricsMetadata(t *testing.T) { _, err := ds[0].Push(ctx, req) require.NoError(t, err) + // Check how many ingesters are queried as part of the shuffle sharding subring. + replicationSet, err := ds[0].GetIngestersForMetadata(ctx) + require.NoError(t, err) + assert.Equal(t, testData.expectedIngesters, len(replicationSet.Instances)) + // Assert on metric metadata metadata, err := ds[0].MetricsMetadata(ctx) require.NoError(t, err) assert.Equal(t, 10, len(metadata)) - - // Check how many ingesters have been queried. - // Due to the quorum the distributor could cancel the last request towards ingesters - // if all other ones are successful, so we're good either has been queried X or X-1 - // ingesters. - assert.Contains(t, []int{testData.expectedIngesters, testData.expectedIngesters - 1}, countMockIngestersCalls(ingesters, "MetricsMetadata")) }) } } diff --git a/pkg/mimir/mimir.go b/pkg/mimir/mimir.go index d04df3cc52..7c1e2c6308 100644 --- a/pkg/mimir/mimir.go +++ b/pkg/mimir/mimir.go @@ -138,7 +138,7 @@ func (c *Config) RegisterFlags(f *flag.FlagSet, logger log.Logger) { c.API.RegisterFlags(f) c.registerServerFlagsWithChangedDefaultValues(f) c.Distributor.RegisterFlags(f, logger) - c.Querier.RegisterFlags(f) + c.Querier.RegisterFlags(f, logger) c.IngesterClient.RegisterFlags(f) c.Ingester.RegisterFlags(f, logger) c.Flusher.RegisterFlags(f) diff --git a/pkg/mimir/modules.go b/pkg/mimir/modules.go index 2cca624d7e..a63adb66bf 100644 --- a/pkg/mimir/modules.go +++ b/pkg/mimir/modules.go @@ -278,7 +278,14 @@ func (t *Mimir) initOverridesExporter() (services.Service, error) { func (t *Mimir) initDistributorService() (serv services.Service, err error) { t.Cfg.Distributor.DistributorRing.ListenPort = t.Cfg.Server.GRPCListenPort - t.Cfg.Distributor.ShuffleShardingLookbackPeriod = t.Cfg.Querier.ShuffleShardingIngestersLookbackPeriod + + // Only enable shuffle sharding on the read path when `query-ingesters-within` + // is non-zero since otherwise we can't determine if an ingester should be part + // of a tenant's shuffle sharding subring (we compare its registration time with + // the lookback period). + if t.Cfg.Querier.ShuffleShardingIngestersEnabled && t.Cfg.Querier.QueryIngestersWithin > 0 { + t.Cfg.Distributor.ShuffleShardingLookbackPeriod = t.Cfg.Querier.QueryIngestersWithin + } // Check whether the distributor can join the distributors ring, which is // whenever it's not running as an internal dependency (ie. querier or diff --git a/pkg/querier/querier.go b/pkg/querier/querier.go index 018f2f0f8a..637b481660 100644 --- a/pkg/querier/querier.go +++ b/pkg/querier/querier.go @@ -15,6 +15,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" + "github.com/grafana/dskit/flagext" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/common/model" "github.com/prometheus/prometheus/model/labels" @@ -49,7 +50,7 @@ type Config struct { StoreGatewayClient ClientConfig `yaml:"store_gateway_client"` - ShuffleShardingIngestersLookbackPeriod time.Duration `yaml:"shuffle_sharding_ingesters_lookback_period" category:"advanced"` + ShuffleShardingIngestersEnabled bool `yaml:"shuffle_sharding_ingesters_enabled" category:"advanced"` // PromQL engine config. EngineConfig engine.Config `yaml:",inline"` @@ -62,20 +63,21 @@ const ( ) var ( - errBadLookbackConfigs = fmt.Errorf("the -%s setting must be greater than -%s otherwise queries might return partial results", queryIngestersWithinFlag, queryStoreAfterFlag) - errShuffleShardingLookbackLessThanQueryStoreAfter = fmt.Errorf("the -%s setting must be greater or equal to -%s", shuffleShardingIngestersLookbackPeriodFlag, queryStoreAfterFlag) - errEmptyTimeRange = errors.New("empty time range") + errBadLookbackConfigs = fmt.Errorf("the -%s setting must be greater than -%s otherwise queries might return partial results", queryIngestersWithinFlag, queryStoreAfterFlag) + errEmptyTimeRange = errors.New("empty time range") ) // RegisterFlags adds the flags required to config this to the given FlagSet. -func (cfg *Config) RegisterFlags(f *flag.FlagSet) { +func (cfg *Config) RegisterFlags(f *flag.FlagSet, logger log.Logger) { cfg.StoreGatewayClient.RegisterFlagsWithPrefix("querier.store-gateway-client", f) f.BoolVar(&cfg.Iterators, "querier.iterators", false, "Use iterators to execute query, as opposed to fully materialising the series in memory.") f.BoolVar(&cfg.BatchIterators, "querier.batch-iterators", true, "Use batch iterators to execute query, as opposed to fully materialising the series in memory. Takes precedent over the -querier.iterators flag.") f.DurationVar(&cfg.QueryIngestersWithin, queryIngestersWithinFlag, 13*time.Hour, "Maximum lookback beyond which queries are not sent to ingester. 0 means all queries are sent to ingester.") f.DurationVar(&cfg.MaxQueryIntoFuture, "querier.max-query-into-future", 10*time.Minute, "Maximum duration into the future you can query. 0 to disable.") f.DurationVar(&cfg.QueryStoreAfter, queryStoreAfterFlag, 12*time.Hour, "The time after which a metric should be queried from storage and not just ingesters. 0 means all queries are sent to store. If this option is enabled, the time range of the query sent to the store-gateway will be manipulated to ensure the query end is not more recent than 'now - query-store-after'.") - f.DurationVar(&cfg.ShuffleShardingIngestersLookbackPeriod, shuffleShardingIngestersLookbackPeriodFlag, 13*time.Hour, "When this setting is > 0, queriers fetch in-memory series from the minimum set of required ingesters, selecting only ingesters which may have received series since 'now - lookback period'. The lookback period should be greater or equal than the configured -querier.query-store-after and -querier.query-ingesters-within. If this setting is 0, queriers always query all ingesters (ingesters shuffle sharding on read path is disabled).") + // TODO(56quarters): Deprecated in Mimir 2.2, remove in Mimir 2.4 + flagext.DeprecatedFlag(f, shuffleShardingIngestersLookbackPeriodFlag, fmt.Sprintf("Deprecated: this setting should always be the same as -%s and will now behave as if it is", queryIngestersWithinFlag), logger) + f.BoolVar(&cfg.ShuffleShardingIngestersEnabled, "querier.shuffle-sharding-ingesters-enabled", true, fmt.Sprintf("Fetch in-memory series from the minimum set of required ingesters, selecting only ingesters which may have received series since -%s. If this setting is false or -%s is '0', queriers always query all ingesters (ingesters shuffle sharding on read path is disabled).", queryIngestersWithinFlag, queryIngestersWithinFlag)) cfg.EngineConfig.RegisterFlags(f) } @@ -89,12 +91,6 @@ func (cfg *Config) Validate() error { } } - if cfg.ShuffleShardingIngestersLookbackPeriod > 0 { - if cfg.ShuffleShardingIngestersLookbackPeriod < cfg.QueryStoreAfter { - return errShuffleShardingLookbackLessThanQueryStoreAfter - } - } - return nil } diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 85673a4839..7cf19b0241 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -1053,19 +1053,6 @@ func TestConfig_Validate(t *testing.T) { cfg.QueryStoreAfter = time.Hour }, }, - "should pass if 'query store after' is enabled and shuffle-sharding is enabled with greater value": { - setup: func(cfg *Config) { - cfg.QueryStoreAfter = time.Hour - cfg.ShuffleShardingIngestersLookbackPeriod = 2 * time.Hour - }, - }, - "should fail if 'query store after' is enabled and shuffle-sharding is enabled with lesser value": { - setup: func(cfg *Config) { - cfg.QueryStoreAfter = time.Hour - cfg.ShuffleShardingIngestersLookbackPeriod = time.Minute - }, - expected: errShuffleShardingLookbackLessThanQueryStoreAfter, - }, "should pass if both 'query store after' and 'query ingesters within' are set and 'query store after' < 'query ingesters within'": { setup: func(cfg *Config) { cfg.QueryStoreAfter = time.Hour From c3fc3c051aea562e8b8860a52eca71ccef2b1363 Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Mon, 20 Jun 2022 15:55:27 +0200 Subject: [PATCH 04/63] K6 script: default HA clusters to 1 (#2142) Signed-off-by: Marco Pracucci --- operations/k6/README.md | 2 +- operations/k6/load-testing-with-k6.js | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/operations/k6/README.md b/operations/k6/README.md index 41e7eb0013..dc176bfb7f 100644 --- a/operations/k6/README.md +++ b/operations/k6/README.md @@ -36,7 +36,7 @@ The [load-testing-with-k6.js] script can be configured using the following envir | `K6_RAMP_DOWN_MIN` | | 0 | Duration of the ramp down period in minutes. | | `K6_SCRAPE_INTERVAL_SECONDS` | | 20 | Simulated Prometheus scrape interval in seconds. | | `K6_HA_REPLICAS` | | 1 | Number of HA replicas to simulate (use 1 for no HA). | -| `K6_HA_CLUSTERS` | | 100 | Number of HA clusters to simulate. | +| `K6_HA_CLUSTERS` | | 1 | Number of HA clusters to simulate. | For example, if Mimir is running on `localhost:80` you can run a small scale test with this command: diff --git a/operations/k6/load-testing-with-k6.js b/operations/k6/load-testing-with-k6.js index 33ad99694c..374e73ffd4 100644 --- a/operations/k6/load-testing-with-k6.js +++ b/operations/k6/load-testing-with-k6.js @@ -88,7 +88,7 @@ const HA_REPLICAS = parseInt(__ENV.K6_HA_REPLICAS || 1); * Number of HA clusters to simulate. * @constant {number} */ -const HA_CLUSTERS = parseInt(__ENV.K6_HA_CLUSTERS || 100); +const HA_CLUSTERS = parseInt(__ENV.K6_HA_CLUSTERS || 1); const remote_write_url = get_remote_write_url(); console.debug("Remote write URL:", remote_write_url) From 5d136e182f8cb82d78adfd8f82c4a30b97eb53a7 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Mon, 20 Jun 2022 16:08:27 +0200 Subject: [PATCH 05/63] Compactor: Enable TSDB block upload on per-tenant basis (#2126) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Compactor: Enable block upload on per-tenant basis * Update changelog Signed-off-by: Arve Knudsen Co-authored-by: Peter Štibraný --- CHANGELOG.md | 2 +- cmd/mimir/config-descriptor.json | 10 +++++ cmd/mimir/help-all.txt.tmpl | 2 + cmd/mimir/help.txt.tmpl | 2 + .../index.md | 4 ++ pkg/compactor/block_upload.go | 8 ++++ pkg/compactor/block_upload_test.go | 39 ++++++++++++++++++- pkg/compactor/blocks_cleaner_test.go | 6 +++ pkg/compactor/compactor.go | 3 ++ pkg/util/validation/limits.go | 7 ++++ 10 files changed, 81 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a9c8fa220f..4adb2ac99f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,7 +31,7 @@ * [ENHANCEMENT] Chunk Mapper: reduce memory usage of async chunk mapper. #2043 * [ENHANCEMENT] Ingesters: Added new configuration option that makes it possible for mimir ingesters to perform queries on overlapping blocks in the filesystem. Enabled with `-blocks-storage.tsdb.allow-overlapping-queries`. #2091 * [ENHANCEMENT] Ingester: reduce sleep time when reading WAL. #2098 -* [ENHANCEMENT] Compactor: Add HTTP API for uploading TSDB blocks. #1694 +* [ENHANCEMENT] Compactor: Add HTTP API for uploading TSDB blocks. Enabled with `-compactor.block-upload-enabled`. #1694 #2126 * [BUGFIX] Fix regexp parsing panic for regexp label matchers with start/end quantifiers. #1883 * [BUGFIX] Ingester: fixed deceiving error log "failed to update cached shipped blocks after shipper initialisation", occurring for each new tenant in the ingester. #1893 * [BUGFIX] Ring: fix bug where instances may appear unhealthy in the hash ring web UI even though they are not. #1933 diff --git a/cmd/mimir/config-descriptor.json b/cmd/mimir/config-descriptor.json index e3da1f23c7..564721a0fb 100644 --- a/cmd/mimir/config-descriptor.json +++ b/cmd/mimir/config-descriptor.json @@ -2879,6 +2879,16 @@ "fieldFlag": "compactor.compactor-tenant-shard-size", "fieldType": "int" }, + { + "kind": "field", + "name": "compactor_block_upload_enabled", + "required": false, + "desc": "Enable block upload API for the tenant.", + "fieldValue": null, + "fieldDefaultValue": false, + "fieldFlag": "compactor.block-upload-enabled", + "fieldType": "boolean" + }, { "kind": "field", "name": "s3_sse_type", diff --git a/cmd/mimir/help-all.txt.tmpl b/cmd/mimir/help-all.txt.tmpl index ead4580500..5e98b2b216 100644 --- a/cmd/mimir/help-all.txt.tmpl +++ b/cmd/mimir/help-all.txt.tmpl @@ -525,6 +525,8 @@ Usage of ./cmd/mimir/mimir: List of compaction time ranges. (default 2h0m0s,12h0m0s,24h0m0s) -compactor.block-sync-concurrency int Number of Go routines to use when downloading blocks for compaction and uploading resulting blocks. (default 8) + -compactor.block-upload-enabled + Enable block upload API for the tenant. -compactor.blocks-retention-period value Delete blocks containing samples older than the specified retention period. 0 to disable. -compactor.cleanup-concurrency int diff --git a/cmd/mimir/help.txt.tmpl b/cmd/mimir/help.txt.tmpl index 3669e3e28d..75e4b5e6fe 100644 --- a/cmd/mimir/help.txt.tmpl +++ b/cmd/mimir/help.txt.tmpl @@ -199,6 +199,8 @@ Usage of ./cmd/mimir/mimir: Directory to store TSDBs (including WAL) in the ingesters. This directory is required to be persisted between restarts. (default "./tsdb/") -blocks-storage.tsdb.retention-period duration TSDB blocks retention in the ingester before a block is removed, relative to the newest block written for the tenant. This should be larger than the -blocks-storage.tsdb.block-ranges-period, -querier.query-store-after and large enough to give store-gateways and queriers enough time to discover newly uploaded blocks. (default 24h0m0s) + -compactor.block-upload-enabled + Enable block upload API for the tenant. -compactor.blocks-retention-period value Delete blocks containing samples older than the specified retention period. 0 to disable. -compactor.compactor-tenant-shard-size int diff --git a/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md b/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md index cd6d753ac3..f10b58f845 100644 --- a/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md +++ b/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md @@ -2855,6 +2855,10 @@ The `limits` block configures default and per-tenant limits imposed by component # CLI flag: -compactor.compactor-tenant-shard-size [compactor_tenant_shard_size: | default = 0] +# Enable block upload API for the tenant. +# CLI flag: -compactor.block-upload-enabled +[compactor_block_upload_enabled: | default = false] + # S3 server-side encryption type. Required to enable server-side encryption # overrides for a specific tenant. If not set, the default S3 client settings # are used. diff --git a/pkg/compactor/block_upload.go b/pkg/compactor/block_upload.go index b01bafcbd2..a575188c8e 100644 --- a/pkg/compactor/block_upload.go +++ b/pkg/compactor/block_upload.go @@ -58,6 +58,10 @@ func (c *MultitenantCompactor) HandleBlockUpload(w http.ResponseWriter, r *http. http.Error(w, "invalid tenant ID", http.StatusBadRequest) return } + if !c.cfgProvider.CompactorBlockUploadEnabled(tenantID) { + http.Error(w, "block upload is disabled", http.StatusBadRequest) + return + } logger := log.With(util_log.WithContext(ctx, c.logger), "block", blockID) @@ -180,6 +184,10 @@ func (c *MultitenantCompactor) UploadBlockFile(w http.ResponseWriter, r *http.Re http.Error(w, "invalid tenant ID", http.StatusBadRequest) return } + if !c.cfgProvider.CompactorBlockUploadEnabled(tenantID) { + http.Error(w, "block upload is disabled", http.StatusBadRequest) + return + } logger := util_log.WithContext(ctx, c.logger) logger = log.With(logger, "block", blockID) diff --git a/pkg/compactor/block_upload_test.go b/pkg/compactor/block_upload_test.go index 87a24f1f7c..ef94b259a9 100644 --- a/pkg/compactor/block_upload_test.go +++ b/pkg/compactor/block_upload_test.go @@ -109,6 +109,7 @@ func TestMultitenantCompactor_HandleBlockUpload_Create(t *testing.T) { body string meta *metadata.Meta retention time.Duration + disableBlockUpload bool expBadRequest string expConflict string expUnprocessableEntity string @@ -376,6 +377,13 @@ func TestMultitenantCompactor_HandleBlockUpload_Create(t *testing.T) { meta: &validMeta, expInternalServerError: true, }, + { + name: "block upload disabled", + tenantID: tenantID, + blockID: blockID, + disableBlockUpload: true, + expBadRequest: "block upload is disabled", + }, { name: "valid request", tenantID: tenantID, @@ -497,6 +505,7 @@ func TestMultitenantCompactor_HandleBlockUpload_Create(t *testing.T) { cfgProvider := newMockConfigProvider() cfgProvider.userRetentionPeriods[tenantID] = tc.retention + cfgProvider.blockUploadEnabled[tenantID] = !tc.disableBlockUpload c := &MultitenantCompactor{ logger: log.NewNopLogger(), bucketClient: &bkt, @@ -639,10 +648,12 @@ func TestMultitenantCompactor_HandleBlockUpload_Create(t *testing.T) { metaJSON, err := json.Marshal(meta) require.NoError(t, err) + cfgProvider := newMockConfigProvider() + cfgProvider.blockUploadEnabled[tenantID] = true c := &MultitenantCompactor{ logger: log.NewNopLogger(), bucketClient: bkt, - cfgProvider: newMockConfigProvider(), + cfgProvider: cfgProvider, } r := httptest.NewRequest(http.MethodPost, fmt.Sprintf("/api/v1/upload/block/%s", blockID), bytes.NewReader(metaJSON)) r = r.WithContext(user.InjectOrgID(r.Context(), tenantID)) @@ -702,6 +713,7 @@ func TestMultitenantCompactor_UploadBlockFile(t *testing.T) { blockID string path string body string + disableBlockUpload bool expBadRequest string expConflict string expNotFound string @@ -764,6 +776,14 @@ func TestMultitenantCompactor_UploadBlockFile(t *testing.T) { body: "content", expBadRequest: fmt.Sprintf("invalid path: %q", uploadingMetaFilename), }, + { + name: "block upload disabled", + tenantID: tenantID, + blockID: blockID, + disableBlockUpload: true, + path: "chunks/000001", + expBadRequest: "block upload is disabled", + }, { name: "complete block already exists", tenantID: tenantID, @@ -857,9 +877,12 @@ func TestMultitenantCompactor_UploadBlockFile(t *testing.T) { tc.setUpBucketMock(&bkt) } + cfgProvider := newMockConfigProvider() + cfgProvider.blockUploadEnabled[tc.tenantID] = !tc.disableBlockUpload c := &MultitenantCompactor{ logger: log.NewNopLogger(), bucketClient: &bkt, + cfgProvider: cfgProvider, } var rdr io.Reader if tc.body != "" { @@ -953,9 +976,12 @@ func TestMultitenantCompactor_UploadBlockFile(t *testing.T) { t.Run(tc.name, func(t *testing.T) { bkt := objstore.NewInMemBucket() tc.setUpBucket(t, bkt) + cfgProvider := newMockConfigProvider() + cfgProvider.blockUploadEnabled[tenantID] = true c := &MultitenantCompactor{ logger: log.NewNopLogger(), bucketClient: bkt, + cfgProvider: cfgProvider, } for _, f := range tc.files { @@ -1027,6 +1053,7 @@ func TestMultitenantCompactor_HandleBlockUpload_Complete(t *testing.T) { name string tenantID string blockID string + disableBlockUpload bool expMeta metadata.Meta expBadRequest string expConflict string @@ -1051,6 +1078,13 @@ func TestMultitenantCompactor_HandleBlockUpload_Complete(t *testing.T) { blockID: "1234", expBadRequest: "invalid block ID", }, + { + name: "block upload disabled", + tenantID: tenantID, + blockID: blockID, + disableBlockUpload: true, + expBadRequest: "block upload is disabled", + }, { name: "complete block already exists", tenantID: tenantID, @@ -1142,9 +1176,12 @@ func TestMultitenantCompactor_HandleBlockUpload_Complete(t *testing.T) { if tc.setUpBucketMock != nil { tc.setUpBucketMock(&bkt) } + cfgProvider := newMockConfigProvider() + cfgProvider.blockUploadEnabled[tc.tenantID] = !tc.disableBlockUpload c := &MultitenantCompactor{ logger: log.NewNopLogger(), bucketClient: &bkt, + cfgProvider: cfgProvider, } r := httptest.NewRequest(http.MethodPost, fmt.Sprintf( "/api/v1/upload/block/%s?uploadComplete=true", tc.blockID), nil) diff --git a/pkg/compactor/blocks_cleaner_test.go b/pkg/compactor/blocks_cleaner_test.go index 3cb0d0c344..1505926d7e 100644 --- a/pkg/compactor/blocks_cleaner_test.go +++ b/pkg/compactor/blocks_cleaner_test.go @@ -699,6 +699,7 @@ type mockConfigProvider struct { splitAndMergeShards map[string]int instancesShardSize map[string]int splitGroups map[string]int + blockUploadEnabled map[string]bool } func newMockConfigProvider() *mockConfigProvider { @@ -706,6 +707,7 @@ func newMockConfigProvider() *mockConfigProvider { userRetentionPeriods: make(map[string]time.Duration), splitAndMergeShards: make(map[string]int), splitGroups: make(map[string]int), + blockUploadEnabled: make(map[string]bool), } } @@ -737,6 +739,10 @@ func (m *mockConfigProvider) CompactorTenantShardSize(user string) int { return 0 } +func (m *mockConfigProvider) CompactorBlockUploadEnabled(tenantID string) bool { + return m.blockUploadEnabled[tenantID] +} + func (m *mockConfigProvider) S3SSEType(user string) string { return "" } diff --git a/pkg/compactor/compactor.go b/pkg/compactor/compactor.go index 58c25e8715..a2f77696ee 100644 --- a/pkg/compactor/compactor.go +++ b/pkg/compactor/compactor.go @@ -192,6 +192,9 @@ type ConfigProvider interface { // CompactorTenantShardSize returns number of compactors that this user can use. 0 = all compactors. CompactorTenantShardSize(userID string) int + + // CompactorBlockUploadEnabled returns whether block upload is enabled for a given tenant. + CompactorBlockUploadEnabled(tenantID string) bool } // MultitenantCompactor is a multi-tenant TSDB blocks compactor based on Thanos. diff --git a/pkg/util/validation/limits.go b/pkg/util/validation/limits.go index c2c6f69db0..c5d35678e7 100644 --- a/pkg/util/validation/limits.go +++ b/pkg/util/validation/limits.go @@ -127,6 +127,7 @@ type Limits struct { CompactorSplitAndMergeShards int `yaml:"compactor_split_and_merge_shards" json:"compactor_split_and_merge_shards"` CompactorSplitGroups int `yaml:"compactor_split_groups" json:"compactor_split_groups"` CompactorTenantShardSize int `yaml:"compactor_tenant_shard_size" json:"compactor_tenant_shard_size"` + CompactorBlockUploadEnabled bool `yaml:"compactor_block_upload_enabled" json:"compactor_block_upload_enabled"` // This config doesn't have a CLI flag registered here because they're registered in // their own original config struct. @@ -204,6 +205,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.CompactorSplitAndMergeShards, "compactor.split-and-merge-shards", 0, "The number of shards to use when splitting blocks. 0 to disable splitting.") f.IntVar(&l.CompactorSplitGroups, "compactor.split-groups", 1, "Number of groups that blocks for splitting should be grouped into. Each group of blocks is then split separately. Number of output split shards is controlled by -compactor.split-and-merge-shards.") f.IntVar(&l.CompactorTenantShardSize, "compactor.compactor-tenant-shard-size", 0, "Max number of compactors that can compact blocks for single tenant. 0 to disable the limit and use all compactors.") + f.BoolVar(&l.CompactorBlockUploadEnabled, "compactor.block-upload-enabled", false, "Enable block upload API for the tenant.") // Store-gateway. f.IntVar(&l.StoreGatewayTenantShardSize, "store-gateway.tenant-shard-size", 0, "The tenant's shard size, used when store-gateway sharding is enabled. Value of 0 disables shuffle sharding for the tenant, that is all tenant blocks are sharded across all store-gateway replicas.") @@ -529,6 +531,11 @@ func (o *Overrides) CompactorSplitGroups(userID string) int { return o.getOverridesForUser(userID).CompactorSplitGroups } +// CompactorBlockUploadEnabled returns whether block upload is enabled for a certain tenant. +func (o *Overrides) CompactorBlockUploadEnabled(tenantID string) bool { + return o.getOverridesForUser(tenantID).CompactorBlockUploadEnabled +} + // MetricRelabelConfigs returns the metric relabel configs for a given user. func (o *Overrides) MetricRelabelConfigs(userID string) []*relabel.Config { return o.getOverridesForUser(userID).MetricRelabelConfigs From e587fe20ae356e55f39b45afbe09831403d43200 Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Mon, 20 Jun 2022 16:46:00 +0200 Subject: [PATCH 06/63] Helm: weekly release (#2145) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Helm: weekly release Manual for now, should be automated. Signed-off-by: György Krajcsovits --- operations/helm/charts/mimir-distributed/Chart.yaml | 2 +- operations/helm/charts/mimir-distributed/README.md | 2 +- operations/helm/charts/mimir-distributed/values.yaml | 4 ++-- .../mimir-distributed/templates/admin-api/admin-api-dep.yaml | 2 +- .../templates/alertmanager/alertmanager-statefulset.yaml | 2 +- .../templates/compactor/compactor-statefulset.yaml | 2 +- .../templates/distributor/distributor-dep.yaml | 2 +- .../mimir-distributed/templates/gateway/gateway-dep.yaml | 2 +- .../templates/ingester/ingester-statefulset.yaml | 2 +- .../templates/overrides-exporter/overrides-exporter-dep.yaml | 2 +- .../mimir-distributed/templates/querier/querier-dep.yaml | 2 +- .../templates/query-frontend/query-frontend-dep.yaml | 2 +- .../mimir-distributed/templates/ruler/ruler-dep.yaml | 2 +- .../templates/store-gateway/store-gateway-statefulset.yaml | 2 +- .../mimir-distributed/templates/tokengen/tokengen-job.yaml | 2 +- .../mimir-distributed/templates/admin-api/admin-api-dep.yaml | 2 +- .../templates/alertmanager/alertmanager-statefulset.yaml | 2 +- .../templates/compactor/compactor-statefulset.yaml | 2 +- .../templates/distributor/distributor-dep.yaml | 2 +- .../mimir-distributed/templates/gateway/gateway-dep.yaml | 2 +- .../templates/ingester/ingester-statefulset.yaml | 2 +- .../templates/overrides-exporter/overrides-exporter-dep.yaml | 2 +- .../mimir-distributed/templates/querier/querier-dep.yaml | 2 +- .../templates/query-frontend/query-frontend-dep.yaml | 2 +- .../mimir-distributed/templates/ruler/ruler-dep.yaml | 2 +- .../templates/store-gateway/store-gateway-statefulset.yaml | 2 +- .../mimir-distributed/templates/tokengen/tokengen-job.yaml | 2 +- .../templates/alertmanager/alertmanager-statefulset.yaml | 2 +- .../templates/compactor/compactor-statefulset.yaml | 2 +- .../templates/distributor/distributor-dep.yaml | 2 +- .../templates/ingester/ingester-statefulset.yaml | 2 +- .../templates/overrides-exporter/overrides-exporter-dep.yaml | 2 +- .../mimir-distributed/templates/querier/querier-dep.yaml | 2 +- .../templates/query-frontend/query-frontend-dep.yaml | 2 +- .../mimir-distributed/templates/ruler/ruler-dep.yaml | 2 +- .../templates/store-gateway/store-gateway-statefulset.yaml | 2 +- 36 files changed, 37 insertions(+), 37 deletions(-) diff --git a/operations/helm/charts/mimir-distributed/Chart.yaml b/operations/helm/charts/mimir-distributed/Chart.yaml index c053243d50..a5d4913ee9 100644 --- a/operations/helm/charts/mimir-distributed/Chart.yaml +++ b/operations/helm/charts/mimir-distributed/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -version: 2.2.0-weekly.190 +version: 2.2.0-weekly.191 appVersion: 2.1.0 description: "Grafana Mimir" engine: gotpl diff --git a/operations/helm/charts/mimir-distributed/README.md b/operations/helm/charts/mimir-distributed/README.md index 2dddd53de7..d406d51057 100644 --- a/operations/helm/charts/mimir-distributed/README.md +++ b/operations/helm/charts/mimir-distributed/README.md @@ -4,7 +4,7 @@ Helm chart for deploying [Grafana Mimir](https://grafana.com/docs/mimir/v2.1.x/) # mimir-distributed -![Version: 2.2.0-weekly.190](https://img.shields.io/badge/Version-2.2.0--weekly.190-informational?style=flat-square) ![AppVersion: 2.1.0](https://img.shields.io/badge/AppVersion-2.1.0-informational?style=flat-square) +![Version: 2.2.0-weekly.191](https://img.shields.io/badge/Version-2.2.0--weekly.191-informational?style=flat-square) ![AppVersion: 2.1.0](https://img.shields.io/badge/AppVersion-2.1.0-informational?style=flat-square) Grafana Mimir diff --git a/operations/helm/charts/mimir-distributed/values.yaml b/operations/helm/charts/mimir-distributed/values.yaml index 3923863486..81a1df787d 100644 --- a/operations/helm/charts/mimir-distributed/values.yaml +++ b/operations/helm/charts/mimir-distributed/values.yaml @@ -16,7 +16,7 @@ fullnameOverride: null # Since the image is unique for all microservices, so are image settings. image: repository: grafana/mimir - tag: r190-fe20bbd + tag: r191-e11ac85 pullPolicy: IfNotPresent # Optionally specify an array of imagePullSecrets. # Secrets must be manually created in the namespace. @@ -1450,7 +1450,7 @@ enterprise: # Container image settings for enterprise, note that pullPolicy and pullSecrets are set in top level .image image: repository: grafana/enterprise-metrics - tag: r190-9abd21a8 + tag: r191-708e13a1 # In order to use Grafana Enterprise Metrics features, you will need to provide the contents of your Grafana Enterprise Metrics # license, either by providing the contents of the license.jwt, or the name Kubernetes Secret that contains your license.jwt. diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml index 49b4e4c73b..4c0fc31a74 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml @@ -43,7 +43,7 @@ spec: initContainers: containers: - name: admin-api - image: "grafana/enterprise-metrics:r190-9abd21a8" + image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=admin-api" diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml index 11df57ab60..d7ff7315b0 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml @@ -66,7 +66,7 @@ spec: emptyDir: {} containers: - name: alertmanager - image: "grafana/enterprise-metrics:r190-9abd21a8" + image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=alertmanager" diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml index 52548d614b..f0efad3943 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml @@ -74,7 +74,7 @@ spec: emptyDir: {} containers: - name: compactor - image: "grafana/enterprise-metrics:r190-9abd21a8" + image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=compactor" diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml index 9ea8728909..72554ca5d3 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml @@ -44,7 +44,7 @@ spec: [] containers: - name: distributor - image: "grafana/enterprise-metrics:r190-9abd21a8" + image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=distributor" diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml index 83a97f3c64..8a07cfd9cb 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml @@ -42,7 +42,7 @@ spec: [] containers: - name: gateway - image: "grafana/enterprise-metrics:r190-9abd21a8" + image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=gateway" diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml index 9c92d6ff1e..6f4f3b802a 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml @@ -65,7 +65,7 @@ spec: emptyDir: {} containers: - name: ingester - image: "grafana/enterprise-metrics:r190-9abd21a8" + image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=ingester" diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml index 6dcd6bf91f..8e78a79e52 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml @@ -42,7 +42,7 @@ spec: [] containers: - name: overrides-exporter - image: "grafana/enterprise-metrics:r190-9abd21a8" + image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=overrides-exporter" diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/querier/querier-dep.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/querier/querier-dep.yaml index 87df168585..7909970e25 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/querier/querier-dep.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/querier/querier-dep.yaml @@ -44,7 +44,7 @@ spec: [] containers: - name: querier - image: "grafana/enterprise-metrics:r190-9abd21a8" + image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=querier" diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml index e5d1190408..1174e3c776 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml @@ -42,7 +42,7 @@ spec: [] containers: - name: query-frontend - image: "grafana/enterprise-metrics:r190-9abd21a8" + image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=query-frontend" diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml index cf627a4844..96579ce9ed 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml @@ -44,7 +44,7 @@ spec: [] containers: - name: ruler - image: "grafana/enterprise-metrics:r190-9abd21a8" + image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=ruler" diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml index d9b00ea821..6fff00f4ba 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml @@ -72,7 +72,7 @@ spec: emptyDir: {} containers: - name: store-gateway - image: "grafana/enterprise-metrics:r190-9abd21a8" + image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=store-gateway" diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml index 1de1939313..210e671516 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml @@ -33,7 +33,7 @@ spec: [] containers: - name: tokengen - image: "grafana/enterprise-metrics:r190-9abd21a8" + image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=tokengen" diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml index 3c71ded98a..b613a4d8ff 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml @@ -42,7 +42,7 @@ spec: initContainers: containers: - name: admin-api - image: "grafana/enterprise-metrics:r190-9abd21a8" + image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=admin-api" diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml index cc539b3d0b..68f13819e5 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml @@ -62,7 +62,7 @@ spec: emptyDir: {} containers: - name: alertmanager - image: "grafana/enterprise-metrics:r190-9abd21a8" + image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=alertmanager" diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml index 108ef09ea4..f522fe28d3 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml @@ -70,7 +70,7 @@ spec: emptyDir: {} containers: - name: compactor - image: "grafana/enterprise-metrics:r190-9abd21a8" + image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=compactor" diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml index 449bb802b3..8d83a8eabb 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml @@ -43,7 +43,7 @@ spec: [] containers: - name: distributor - image: "grafana/enterprise-metrics:r190-9abd21a8" + image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=distributor" diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml index 53a12f62e6..62bc5c3e8e 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml @@ -41,7 +41,7 @@ spec: [] containers: - name: gateway - image: "grafana/enterprise-metrics:r190-9abd21a8" + image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=gateway" diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml index afa645b115..04f4edf5b3 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml @@ -61,7 +61,7 @@ spec: emptyDir: {} containers: - name: ingester - image: "grafana/enterprise-metrics:r190-9abd21a8" + image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=ingester" diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml index db2c57beae..3d455ef79e 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml @@ -41,7 +41,7 @@ spec: [] containers: - name: overrides-exporter - image: "grafana/enterprise-metrics:r190-9abd21a8" + image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=overrides-exporter" diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/querier/querier-dep.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/querier/querier-dep.yaml index 3cc531a18f..2963627548 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/querier/querier-dep.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/querier/querier-dep.yaml @@ -43,7 +43,7 @@ spec: [] containers: - name: querier - image: "grafana/enterprise-metrics:r190-9abd21a8" + image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=querier" diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml index 74d11b0de0..65f6d5bee2 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml @@ -41,7 +41,7 @@ spec: [] containers: - name: query-frontend - image: "grafana/enterprise-metrics:r190-9abd21a8" + image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=query-frontend" diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml index 664ee74203..7c9fb5e6ae 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml @@ -43,7 +43,7 @@ spec: [] containers: - name: ruler - image: "grafana/enterprise-metrics:r190-9abd21a8" + image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=ruler" diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml index 76c1289591..ca17ca3deb 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml @@ -68,7 +68,7 @@ spec: emptyDir: {} containers: - name: store-gateway - image: "grafana/enterprise-metrics:r190-9abd21a8" + image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=store-gateway" diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml index 1061da255e..40e081f17c 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml @@ -33,7 +33,7 @@ spec: [] containers: - name: tokengen - image: "grafana/enterprise-metrics:r190-9abd21a8" + image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=tokengen" diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml index 58a1f035ab..a0eb9b09e3 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml @@ -63,7 +63,7 @@ spec: emptyDir: {} containers: - name: alertmanager - image: "grafana/mimir:r190-fe20bbd" + image: "grafana/mimir:r191-e11ac85" imagePullPolicy: IfNotPresent args: - "-target=alertmanager" diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml index 97c0038187..cb07c0d00e 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml @@ -71,7 +71,7 @@ spec: emptyDir: {} containers: - name: compactor - image: "grafana/mimir:r190-fe20bbd" + image: "grafana/mimir:r191-e11ac85" imagePullPolicy: IfNotPresent args: - "-target=compactor" diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml index 70482b0b57..0d623bf99f 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml @@ -44,7 +44,7 @@ spec: [] containers: - name: distributor - image: "grafana/mimir:r190-fe20bbd" + image: "grafana/mimir:r191-e11ac85" imagePullPolicy: IfNotPresent args: - "-target=distributor" diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml index afdd4a22bf..ebd5550e6f 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml @@ -62,7 +62,7 @@ spec: emptyDir: {} containers: - name: ingester - image: "grafana/mimir:r190-fe20bbd" + image: "grafana/mimir:r191-e11ac85" imagePullPolicy: IfNotPresent args: - "-target=ingester" diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml index 27c796ef84..98ebd1fe7f 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml @@ -42,7 +42,7 @@ spec: [] containers: - name: overrides-exporter - image: "grafana/mimir:r190-fe20bbd" + image: "grafana/mimir:r191-e11ac85" imagePullPolicy: IfNotPresent args: - "-target=overrides-exporter" diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/querier/querier-dep.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/querier/querier-dep.yaml index 051f5e6758..916766991b 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/querier/querier-dep.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/querier/querier-dep.yaml @@ -44,7 +44,7 @@ spec: [] containers: - name: querier - image: "grafana/mimir:r190-fe20bbd" + image: "grafana/mimir:r191-e11ac85" imagePullPolicy: IfNotPresent args: - "-target=querier" diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml index b532727436..ca935cd634 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml @@ -42,7 +42,7 @@ spec: [] containers: - name: query-frontend - image: "grafana/mimir:r190-fe20bbd" + image: "grafana/mimir:r191-e11ac85" imagePullPolicy: IfNotPresent args: - "-target=query-frontend" diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml index 1ba4f56e5e..bbdea3b80c 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml @@ -44,7 +44,7 @@ spec: [] containers: - name: ruler - image: "grafana/mimir:r190-fe20bbd" + image: "grafana/mimir:r191-e11ac85" imagePullPolicy: IfNotPresent args: - "-target=ruler" diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml index c4f1773a27..7190c0404a 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml @@ -69,7 +69,7 @@ spec: emptyDir: {} containers: - name: store-gateway - image: "grafana/mimir:r190-fe20bbd" + image: "grafana/mimir:r191-e11ac85" imagePullPolicy: IfNotPresent args: - "-target=store-gateway" From 6011ecebc11ed020405bcefac246fcf28a045857 Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Mon, 20 Jun 2022 17:54:02 +0200 Subject: [PATCH 07/63] Fix error message typo (#2151) Signed-off-by: Oleg Zaytsev --- pkg/ruler/api.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/ruler/api.go b/pkg/ruler/api.go index 4aa0e3a722..8b6590b5c5 100644 --- a/pkg/ruler/api.go +++ b/pkg/ruler/api.go @@ -295,7 +295,7 @@ var ( // ErrNoRuleGroups signals the rule group requested does not exist ErrNoRuleGroups = errors.New("no rule groups found") // ErrBadRuleGroup is returned when the provided rule group can not be unmarshalled - ErrBadRuleGroup = errors.New("unable to decoded rule group") + ErrBadRuleGroup = errors.New("unable to decode rule group") ) func marshalAndSend(output interface{}, w http.ResponseWriter, logger log.Logger) { From 98a4d0966c2c71a9da5de7e73c39547fa05dee80 Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Mon, 20 Jun 2022 17:59:53 +0200 Subject: [PATCH 08/63] Helm: prune image reference from golden record (#2149) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This changes with weekly release as well. Signed-off-by: György Krajcsovits --- operations/helm/tests/build.sh | 5 +++-- .../mimir-distributed/templates/admin-api/admin-api-dep.yaml | 1 - .../templates/alertmanager/alertmanager-statefulset.yaml | 1 - .../templates/compactor/compactor-statefulset.yaml | 1 - .../templates/distributor/distributor-dep.yaml | 1 - .../mimir-distributed/templates/gateway/gateway-dep.yaml | 1 - .../templates/ingester/ingester-statefulset.yaml | 1 - .../templates/overrides-exporter/overrides-exporter-dep.yaml | 1 - .../mimir-distributed/templates/querier/querier-dep.yaml | 1 - .../templates/query-frontend/query-frontend-dep.yaml | 1 - .../mimir-distributed/templates/ruler/ruler-dep.yaml | 1 - .../templates/store-gateway/store-gateway-statefulset.yaml | 1 - .../mimir-distributed/templates/tokengen/tokengen-job.yaml | 1 - .../mimir-distributed/templates/admin-api/admin-api-dep.yaml | 1 - .../templates/alertmanager/alertmanager-statefulset.yaml | 1 - .../templates/compactor/compactor-statefulset.yaml | 1 - .../templates/distributor/distributor-dep.yaml | 1 - .../mimir-distributed/templates/gateway/gateway-dep.yaml | 1 - .../templates/ingester/ingester-statefulset.yaml | 1 - .../templates/overrides-exporter/overrides-exporter-dep.yaml | 1 - .../mimir-distributed/templates/querier/querier-dep.yaml | 1 - .../templates/query-frontend/query-frontend-dep.yaml | 1 - .../mimir-distributed/templates/ruler/ruler-dep.yaml | 1 - .../templates/store-gateway/store-gateway-statefulset.yaml | 1 - .../mimir-distributed/templates/tokengen/tokengen-job.yaml | 1 - .../templates/alertmanager/alertmanager-statefulset.yaml | 1 - .../templates/compactor/compactor-statefulset.yaml | 1 - .../templates/distributor/distributor-dep.yaml | 1 - .../templates/ingester/ingester-statefulset.yaml | 1 - .../templates/overrides-exporter/overrides-exporter-dep.yaml | 1 - .../mimir-distributed/templates/querier/querier-dep.yaml | 1 - .../templates/query-frontend/query-frontend-dep.yaml | 1 - .../mimir-distributed/templates/ruler/ruler-dep.yaml | 1 - .../templates/store-gateway/store-gateway-statefulset.yaml | 1 - 34 files changed, 3 insertions(+), 35 deletions(-) diff --git a/operations/helm/tests/build.sh b/operations/helm/tests/build.sh index 8686b963f4..d3f8920b33 100755 --- a/operations/helm/tests/build.sh +++ b/operations/helm/tests/build.sh @@ -22,6 +22,7 @@ for FILEPATH in $TESTS; do echo "Templating $TEST_NAME" helm template "${TEST_NAME}" ${CHART_PATH} -f "${FILEPATH}" --output-dir "${OUTPUT_DIR}" --namespace citestns - echo "Removing mutable config checksum and helm chart version for clarity" - find "${OUTPUT_DIR}/$(basename ${CHART_PATH})/templates" -type f -print0 | xargs -0 sed -E -i -- "/^[ ]+(checksum\/config|(helm.sh\/)?chart):/d" + echo "Removing mutable config checksum, helm chart, image tag version for clarity" + find "${OUTPUT_DIR}/$(basename ${CHART_PATH})/templates" -type f -print0 | xargs -0 sed -E -i -- "/^\s+(checksum\/config|(helm.sh\/)?chart|image: \"grafana\/(mimir|enterprise-metrics)):/d" + done diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml index 4c0fc31a74..21feec5668 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml @@ -43,7 +43,6 @@ spec: initContainers: containers: - name: admin-api - image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=admin-api" diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml index d7ff7315b0..9f2548aff5 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml @@ -66,7 +66,6 @@ spec: emptyDir: {} containers: - name: alertmanager - image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=alertmanager" diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml index f0efad3943..57e2992f1e 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml @@ -74,7 +74,6 @@ spec: emptyDir: {} containers: - name: compactor - image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=compactor" diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml index 72554ca5d3..79b511bba0 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml @@ -44,7 +44,6 @@ spec: [] containers: - name: distributor - image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=distributor" diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml index 8a07cfd9cb..45333ba0ab 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml @@ -42,7 +42,6 @@ spec: [] containers: - name: gateway - image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=gateway" diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml index 6f4f3b802a..be76101b94 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml @@ -65,7 +65,6 @@ spec: emptyDir: {} containers: - name: ingester - image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=ingester" diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml index 8e78a79e52..c5f53ed98e 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml @@ -42,7 +42,6 @@ spec: [] containers: - name: overrides-exporter - image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=overrides-exporter" diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/querier/querier-dep.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/querier/querier-dep.yaml index 7909970e25..11251349d1 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/querier/querier-dep.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/querier/querier-dep.yaml @@ -44,7 +44,6 @@ spec: [] containers: - name: querier - image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=querier" diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml index 1174e3c776..cc09f7ae2b 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml @@ -42,7 +42,6 @@ spec: [] containers: - name: query-frontend - image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=query-frontend" diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml index 96579ce9ed..d36f1615e0 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml @@ -44,7 +44,6 @@ spec: [] containers: - name: ruler - image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=ruler" diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml index 6fff00f4ba..a53b6ea521 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml @@ -72,7 +72,6 @@ spec: emptyDir: {} containers: - name: store-gateway - image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=store-gateway" diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml index 210e671516..503cdf6445 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml @@ -33,7 +33,6 @@ spec: [] containers: - name: tokengen - image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=tokengen" diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml index b613a4d8ff..6eec839fae 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml @@ -42,7 +42,6 @@ spec: initContainers: containers: - name: admin-api - image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=admin-api" diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml index 68f13819e5..64c5225d58 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml @@ -62,7 +62,6 @@ spec: emptyDir: {} containers: - name: alertmanager - image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=alertmanager" diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml index f522fe28d3..8cb4eca2f0 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml @@ -70,7 +70,6 @@ spec: emptyDir: {} containers: - name: compactor - image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=compactor" diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml index 8d83a8eabb..7abcb00136 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml @@ -43,7 +43,6 @@ spec: [] containers: - name: distributor - image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=distributor" diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml index 62bc5c3e8e..3ad9f8ebf6 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml @@ -41,7 +41,6 @@ spec: [] containers: - name: gateway - image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=gateway" diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml index 04f4edf5b3..0416de3aed 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml @@ -61,7 +61,6 @@ spec: emptyDir: {} containers: - name: ingester - image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=ingester" diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml index 3d455ef79e..ec56f13de7 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml @@ -41,7 +41,6 @@ spec: [] containers: - name: overrides-exporter - image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=overrides-exporter" diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/querier/querier-dep.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/querier/querier-dep.yaml index 2963627548..743d155cad 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/querier/querier-dep.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/querier/querier-dep.yaml @@ -43,7 +43,6 @@ spec: [] containers: - name: querier - image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=querier" diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml index 65f6d5bee2..f6e68eb670 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml @@ -41,7 +41,6 @@ spec: [] containers: - name: query-frontend - image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=query-frontend" diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml index 7c9fb5e6ae..72ea32258d 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml @@ -43,7 +43,6 @@ spec: [] containers: - name: ruler - image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=ruler" diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml index ca17ca3deb..0622da8e20 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml @@ -68,7 +68,6 @@ spec: emptyDir: {} containers: - name: store-gateway - image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=store-gateway" diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml index 40e081f17c..d01f9abc26 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml @@ -33,7 +33,6 @@ spec: [] containers: - name: tokengen - image: "grafana/enterprise-metrics:r191-708e13a1" imagePullPolicy: IfNotPresent args: - "-target=tokengen" diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml index a0eb9b09e3..38bab765b8 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml @@ -63,7 +63,6 @@ spec: emptyDir: {} containers: - name: alertmanager - image: "grafana/mimir:r191-e11ac85" imagePullPolicy: IfNotPresent args: - "-target=alertmanager" diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml index cb07c0d00e..9c3e073b88 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml @@ -71,7 +71,6 @@ spec: emptyDir: {} containers: - name: compactor - image: "grafana/mimir:r191-e11ac85" imagePullPolicy: IfNotPresent args: - "-target=compactor" diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml index 0d623bf99f..cd5cc73e68 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml @@ -44,7 +44,6 @@ spec: [] containers: - name: distributor - image: "grafana/mimir:r191-e11ac85" imagePullPolicy: IfNotPresent args: - "-target=distributor" diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml index ebd5550e6f..8889b57812 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml @@ -62,7 +62,6 @@ spec: emptyDir: {} containers: - name: ingester - image: "grafana/mimir:r191-e11ac85" imagePullPolicy: IfNotPresent args: - "-target=ingester" diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml index 98ebd1fe7f..597e29c8f2 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml @@ -42,7 +42,6 @@ spec: [] containers: - name: overrides-exporter - image: "grafana/mimir:r191-e11ac85" imagePullPolicy: IfNotPresent args: - "-target=overrides-exporter" diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/querier/querier-dep.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/querier/querier-dep.yaml index 916766991b..c3324c49c0 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/querier/querier-dep.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/querier/querier-dep.yaml @@ -44,7 +44,6 @@ spec: [] containers: - name: querier - image: "grafana/mimir:r191-e11ac85" imagePullPolicy: IfNotPresent args: - "-target=querier" diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml index ca935cd634..eb1fb0ddf6 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml @@ -42,7 +42,6 @@ spec: [] containers: - name: query-frontend - image: "grafana/mimir:r191-e11ac85" imagePullPolicy: IfNotPresent args: - "-target=query-frontend" diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml index bbdea3b80c..612db7586e 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml @@ -44,7 +44,6 @@ spec: [] containers: - name: ruler - image: "grafana/mimir:r191-e11ac85" imagePullPolicy: IfNotPresent args: - "-target=ruler" diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml index 7190c0404a..792f1c93b3 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml @@ -69,7 +69,6 @@ spec: emptyDir: {} containers: - name: store-gateway - image: "grafana/mimir:r191-e11ac85" imagePullPolicy: IfNotPresent args: - "-target=store-gateway" From fa8bc0c046d1e2b3b44953d2699bfd60818f4a65 Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Mon, 20 Jun 2022 18:12:11 +0200 Subject: [PATCH 09/63] Run blocks storage sanity check on compactor too at startup (#2144) Signed-off-by: Marco Pracucci --- CHANGELOG.md | 1 + pkg/mimir/sanity_check.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4adb2ac99f..ea5f958f46 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -31,6 +31,7 @@ * [ENHANCEMENT] Chunk Mapper: reduce memory usage of async chunk mapper. #2043 * [ENHANCEMENT] Ingesters: Added new configuration option that makes it possible for mimir ingesters to perform queries on overlapping blocks in the filesystem. Enabled with `-blocks-storage.tsdb.allow-overlapping-queries`. #2091 * [ENHANCEMENT] Ingester: reduce sleep time when reading WAL. #2098 +* [ENHANCEMENT] Compactor: Run sanity check on blocks storage configuration at startup. #2143 * [ENHANCEMENT] Compactor: Add HTTP API for uploading TSDB blocks. Enabled with `-compactor.block-upload-enabled`. #1694 #2126 * [BUGFIX] Fix regexp parsing panic for regexp label matchers with start/end quantifiers. #1883 * [BUGFIX] Ingester: fixed deceiving error log "failed to update cached shipped blocks after shipper initialisation", occurring for each new tenant in the ingester. #1893 diff --git a/pkg/mimir/sanity_check.go b/pkg/mimir/sanity_check.go index e27e10694a..3b529398dd 100644 --- a/pkg/mimir/sanity_check.go +++ b/pkg/mimir/sanity_check.go @@ -128,7 +128,7 @@ func checkObjectStoresConfig(ctx context.Context, cfg Config, logger log.Logger) errs := multierror.New() // Check blocks storage config only if running at least one component using it. - if cfg.isAnyModuleEnabled(All, Ingester, Querier, Ruler, StoreGateway) { + if cfg.isAnyModuleEnabled(All, Ingester, Querier, Ruler, StoreGateway, Compactor) { errs.Add(errors.Wrap(checkObjectStoreConfig(ctx, cfg.BlocksStorage.Bucket, logger), "blocks storage")) } From b9454da69c064ec8382964fc91dc18a20095f585 Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Mon, 20 Jun 2022 19:05:35 +0200 Subject: [PATCH 10/63] Docs: fix 'Monitoring Grafana Mimir' URL path (#2130) Signed-off-by: Marco Pracucci --- Makefile | 2 +- .../migration-guide/migrating-from-cortex.md | 8 ++++---- .../deploying-grafana-mimir/jsonnet/configuring.md | 2 +- .../_index.md | 2 ++ .../dashboards/_index.md | 2 ++ .../dashboards/alertmanager-resources/index.md | 2 ++ .../mimir-alertmanager-resources.png | Bin .../dashboards/alertmanager/index.md | 2 ++ .../dashboards/alertmanager/mimir-alertmanager.png | Bin .../dashboards/compactor-resources/index.md | 2 ++ .../mimir-compactor-resources.png | Bin .../dashboards/compactor/index.md | 2 ++ .../dashboards/compactor/mimir-compactor.png | Bin .../dashboards/config/index.md | 2 ++ .../dashboards/config/mimir-config.png | Bin .../dashboards/object-store/index.md | 2 ++ .../dashboards/object-store/mimir-object-store.png | Bin .../dashboards/overrides/index.md | 2 ++ .../dashboards/overrides/mimir-overrides.png | Bin .../dashboards/queries/index.md | 2 ++ .../dashboards/queries/mimir-queries.png | Bin .../dashboards/reads-networking/index.md | 2 ++ .../reads-networking/mimir-reads-networking.png | Bin .../dashboards/reads-resources/index.md | 2 ++ .../reads-resources/mimir-reads-resources.png | Bin .../dashboards/reads/index.md | 2 ++ .../dashboards/reads/mimir-reads.png | Bin .../remote-ruler-reads-resources/index.md | 2 ++ .../mimir-remote-ruler-reads-resources.png | Bin .../dashboards/remote-ruler-reads/index.md | 2 ++ .../remote-ruler-reads/mimir-remote-ruler-reads.png | Bin .../dashboards/rollout-progress/index.md | 2 ++ .../rollout-progress/mimir-rollout-progress.png | Bin .../dashboards/ruler/index.md | 2 ++ .../dashboards/ruler/mimir-ruler.png | Bin .../dashboards/scaling/index.md | 2 ++ .../dashboards/scaling/mimir-scaling.png | Bin .../dashboards/slow-queries/index.md | 2 ++ .../dashboards/tenants/index.md | 2 ++ .../dashboards/tenants/mimir-tenants.png | Bin .../dashboards/top-tenants/index.md | 2 ++ .../dashboards/writes-networking/index.md | 2 ++ .../writes-networking/mimir-writes-networking.png | Bin .../dashboards/writes-resources/index.md | 2 ++ .../writes-resources/mimir-writes-resources.png | Bin .../dashboards/writes/index.md | 2 ++ .../dashboards/writes/mimir-writes.png | Bin .../deploying-monitoring-mixin.md} | 2 ++ .../installing-dashboards-and-alerts.md | 2 ++ .../requirements.md | 2 ++ .../operators-guide/tools/mimir-continuous-test.md | 2 +- docs/sources/release-notes/v2.0.md | 2 +- operations/mimir-mixin-tools/screenshots/run.sh | 2 +- operations/mimir-mixin/config.libsonnet | 2 +- 54 files changed, 64 insertions(+), 10 deletions(-) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/_index.md (91%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/_index.md (85%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/alertmanager-resources/index.md (89%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/alertmanager-resources/mimir-alertmanager-resources.png (100%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/alertmanager/index.md (88%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/alertmanager/mimir-alertmanager.png (100%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/compactor-resources/index.md (89%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/compactor-resources/mimir-compactor-resources.png (100%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/compactor/index.md (88%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/compactor/mimir-compactor.png (100%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/config/index.md (87%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/config/mimir-config.png (100%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/object-store/index.md (88%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/object-store/mimir-object-store.png (100%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/overrides/index.md (86%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/overrides/mimir-overrides.png (100%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/queries/index.md (87%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/queries/mimir-queries.png (100%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/reads-networking/index.md (91%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/reads-networking/mimir-reads-networking.png (100%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/reads-resources/index.md (91%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/reads-resources/mimir-reads-resources.png (100%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/reads/index.md (90%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/reads/mimir-reads.png (100%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/remote-ruler-reads-resources/index.md (91%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/remote-ruler-reads-resources/mimir-remote-ruler-reads-resources.png (100%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/remote-ruler-reads/index.md (90%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/remote-ruler-reads/mimir-remote-ruler-reads.png (100%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/rollout-progress/index.md (88%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/rollout-progress/mimir-rollout-progress.png (100%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/ruler/index.md (88%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/ruler/mimir-ruler.png (100%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/scaling/index.md (88%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/scaling/mimir-scaling.png (100%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/slow-queries/index.md (90%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/tenants/index.md (86%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/tenants/mimir-tenants.png (100%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/top-tenants/index.md (81%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/writes-networking/index.md (91%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/writes-networking/mimir-writes-networking.png (100%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/writes-resources/index.md (91%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/writes-resources/mimir-writes-resources.png (100%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/writes/index.md (90%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/dashboards/writes/mimir-writes.png (100%) rename docs/sources/operators-guide/{visualizing-metrics/deploying-monitor-mixin.md => monitoring-grafana-mimir/deploying-monitoring-mixin.md} (96%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/installing-dashboards-and-alerts.md (98%) rename docs/sources/operators-guide/{visualizing-metrics => monitoring-grafana-mimir}/requirements.md (99%) diff --git a/Makefile b/Makefile index f80ef41b5c..822f71d5ae 100644 --- a/Makefile +++ b/Makefile @@ -476,7 +476,7 @@ mixin-serve: ## Runs Grafana (listening on port 3000) loading the mixin dashboar @./operations/mimir-mixin-tools/serve/run.sh mixin-screenshots: ## Generates mixin dashboards screenshots. - @find docs/sources/operators-guide/visualizing-metrics/dashboards -name '*.png' -delete + @find docs/sources/operators-guide/monitoring-grafana-mimir/dashboards -name '*.png' -delete @./operations/mimir-mixin-tools/screenshots/run.sh check-jsonnet-manifests: format-jsonnet-manifests diff --git a/docs/sources/migration-guide/migrating-from-cortex.md b/docs/sources/migration-guide/migrating-from-cortex.md index ea69f087df..fffee04e3d 100644 --- a/docs/sources/migration-guide/migrating-from-cortex.md +++ b/docs/sources/migration-guide/migrating-from-cortex.md @@ -155,7 +155,7 @@ jb install github.com/grafana/mimir/operations/mimir-mixin@main a. Add the dashboards to Grafana. The dashboards replace your Cortex dashboards and continue to work for monitoring Cortex deployments. > **Note:** Resource dashboards are now enabled by default and require additional metrics sources. - > To understand the required metrics sources, refer to [Additional resources metrics]({{< relref "../operators-guide/visualizing-metrics/requirements.md#additional-resources-metrics" >}}). + > To understand the required metrics sources, refer to [Additional resources metrics]({{< relref "../operators-guide/monitoring-grafana-mimir/requirements.md#additional-resources-metrics" >}}). b. Install the recording and alerting rules into the ruler or a Prometheus server. @@ -180,7 +180,7 @@ jb install github.com/grafana/mimir/operations/mimir-mixin@main To extract the flags for each component, refer to [Extracting flags from Jsonnet]({{< relref "../operators-guide/tools/mimirtool.md#extracting-flags-from-jsonnet" >}}). 1. Apply the updated Jsonnet -To verify that the cluster is operating correctly, use the [monitoring mixin dashboards]({{< relref "../operators-guide/visualizing-metrics/dashboards/_index.md" >}}). +To verify that the cluster is operating correctly, use the [monitoring mixin dashboards]({{< relref "../operators-guide/monitoring-grafana-mimir/dashboards/_index.md" >}}). ## Migrating to Grafana Mimir using Helm @@ -206,7 +206,7 @@ You can update to the Grafana Mimir Helm chart from the Cortex Helm chart. a. Add the dashboards to Grafana. The dashboards replace your Cortex dashboards and continue to work for monitoring Cortex deployments. > **Note:** Resource dashboards are now enabled by default and require additional metrics sources. - > To understand the required metrics sources, refer to [Additional resources metrics]({{< relref "../operators-guide/visualizing-metrics/requirements.md#additional-resources-metrics" >}}). + > To understand the required metrics sources, refer to [Additional resources metrics]({{< relref "../operators-guide/monitoring-grafana-mimir/requirements.md#additional-resources-metrics" >}}). b. Install the recording and alerting rules into the ruler or a Prometheus server. @@ -334,4 +334,4 @@ You can update to the Grafana Mimir Helm chart from the Cortex Helm chart. helm upgrade grafana/mimir-distributed [-n ] ``` -To verify that the cluster is operating correctly, use the [monitoring mixin dashboards]({{< relref "../operators-guide/visualizing-metrics/dashboards/_index.md" >}}). +To verify that the cluster is operating correctly, use the [monitoring mixin dashboards]({{< relref "../operators-guide/monitoring-grafana-mimir/dashboards/_index.md" >}}). diff --git a/docs/sources/operators-guide/deploying-grafana-mimir/jsonnet/configuring.md b/docs/sources/operators-guide/deploying-grafana-mimir/jsonnet/configuring.md index 0ca0f66aa4..e40dffa95e 100644 --- a/docs/sources/operators-guide/deploying-grafana-mimir/jsonnet/configuring.md +++ b/docs/sources/operators-guide/deploying-grafana-mimir/jsonnet/configuring.md @@ -40,7 +40,7 @@ mimir { Default scaling of Mimir components in the provided Jsonnet is opinionated and based on engineers’ years of experience running it at Grafana Labs. The default resource requests and limits are also fine-tuned for the provided alerting rules. -For more information, see [Monitoring Grafana Mimir]({{< relref "../../visualizing-metrics/_index.md" >}}). +For more information, see [Monitoring Grafana Mimir]({{< relref "../../monitoring-grafana-mimir/_index.md" >}}). However, there are use cases where you might want to change the default resource requests, their limits, or both. For example, if you are just testing Mimir and you want to run it on a small (possibly one-node) Kubernetes cluster, and you do not have tens of gigabytes of memory or multiple cores to schedule the components, consider overriding the scaling requirements as follows: diff --git a/docs/sources/operators-guide/visualizing-metrics/_index.md b/docs/sources/operators-guide/monitoring-grafana-mimir/_index.md similarity index 91% rename from docs/sources/operators-guide/visualizing-metrics/_index.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/_index.md index 101e050198..5e4e7d0bdf 100644 --- a/docs/sources/operators-guide/visualizing-metrics/_index.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/_index.md @@ -5,6 +5,8 @@ description: "View example Grafana Mimir dashboards." weight: 50 keywords: - Mimir dashboards +aliases: + - visualizing-metrics/ --- # Monitoring Grafana Mimir diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/_index.md b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/_index.md similarity index 85% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/_index.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/_index.md index 867b07108f..bf86a298bf 100644 --- a/docs/sources/operators-guide/visualizing-metrics/dashboards/_index.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/_index.md @@ -3,6 +3,8 @@ title: "Viewing Grafana Mimir dashboards" menuTitle: "Viewing dashboards" description: "View examples of production-ready Grafana Mimir dashboards." weight: 40 +aliases: + - ../visualizing-metrics/dashboards/ --- # Viewing Grafana Mimir dashboards diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/alertmanager-resources/index.md b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/alertmanager-resources/index.md similarity index 89% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/alertmanager-resources/index.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/alertmanager-resources/index.md index 122e76494b..c40242b1ac 100644 --- a/docs/sources/operators-guide/visualizing-metrics/dashboards/alertmanager-resources/index.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/alertmanager-resources/index.md @@ -3,6 +3,8 @@ title: "Grafana Mimir Alertmanager resources dashboards" menuTitle: "Alertmanager resources" description: "View an example Alertmanager resources dashboard." weight: 20 +aliases: + - ../../visualizing-metrics/dashboards/alertmanager-resources/ --- # Grafana Mimir Alertmanager resources dashboard diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/alertmanager-resources/mimir-alertmanager-resources.png b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/alertmanager-resources/mimir-alertmanager-resources.png similarity index 100% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/alertmanager-resources/mimir-alertmanager-resources.png rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/alertmanager-resources/mimir-alertmanager-resources.png diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/alertmanager/index.md b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/alertmanager/index.md similarity index 88% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/alertmanager/index.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/alertmanager/index.md index 972bd34b58..f3b282705c 100644 --- a/docs/sources/operators-guide/visualizing-metrics/dashboards/alertmanager/index.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/alertmanager/index.md @@ -3,6 +3,8 @@ title: "Grafana Mimir Alertmanager dashboard" menuTitle: "Alertmanager" description: "View an example Alertmanager dashboard." weight: 10 +aliases: + - ../../visualizing-metrics/dashboards/alertmanager/ --- # Grafana Mimir Alertmanager dashboard diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/alertmanager/mimir-alertmanager.png b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/alertmanager/mimir-alertmanager.png similarity index 100% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/alertmanager/mimir-alertmanager.png rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/alertmanager/mimir-alertmanager.png diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/compactor-resources/index.md b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/compactor-resources/index.md similarity index 89% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/compactor-resources/index.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/compactor-resources/index.md index 382929f732..e849b90a10 100644 --- a/docs/sources/operators-guide/visualizing-metrics/dashboards/compactor-resources/index.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/compactor-resources/index.md @@ -3,6 +3,8 @@ title: "Grafana Mimir Compactor resources dashboard" menuTitle: "Compactor resources" description: "View an example Compactor resources dashboard." weight: 40 +aliases: + - ../../visualizing-metrics/dashboards/compactor-resources/ --- # Grafana Mimir Compactor resources dashboard diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/compactor-resources/mimir-compactor-resources.png b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/compactor-resources/mimir-compactor-resources.png similarity index 100% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/compactor-resources/mimir-compactor-resources.png rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/compactor-resources/mimir-compactor-resources.png diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/compactor/index.md b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/compactor/index.md similarity index 88% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/compactor/index.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/compactor/index.md index 43e9c75402..729036b5e2 100644 --- a/docs/sources/operators-guide/visualizing-metrics/dashboards/compactor/index.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/compactor/index.md @@ -3,6 +3,8 @@ title: "Grafana Mimir Compactor dashboard" menuTitle: "Compactor" description: "View an example Compactor dashboard." weight: 30 +aliases: + - ../../visualizing-metrics/dashboards/compactor/ --- # Grafana Mimir Compactor dashboard diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/compactor/mimir-compactor.png b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/compactor/mimir-compactor.png similarity index 100% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/compactor/mimir-compactor.png rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/compactor/mimir-compactor.png diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/config/index.md b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/config/index.md similarity index 87% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/config/index.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/config/index.md index 0eb1f77e66..afd28bfefa 100644 --- a/docs/sources/operators-guide/visualizing-metrics/dashboards/config/index.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/config/index.md @@ -3,6 +3,8 @@ title: "Grafana Mimir Config dashboard" menuTitle: "Config" description: "View an example Config dashboard." weight: 50 +aliases: + - ../../visualizing-metrics/dashboards/config/ --- # Grafana Mimir Config dashboard diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/config/mimir-config.png b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/config/mimir-config.png similarity index 100% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/config/mimir-config.png rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/config/mimir-config.png diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/object-store/index.md b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/object-store/index.md similarity index 88% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/object-store/index.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/object-store/index.md index fa28171472..af79913069 100644 --- a/docs/sources/operators-guide/visualizing-metrics/dashboards/object-store/index.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/object-store/index.md @@ -3,6 +3,8 @@ title: "Grafana Mimir Object Store dashboard" menuTitle: "Object Store" description: "View an example Object Store dashboard." weight: 60 +aliases: + - ../../visualizing-metrics/dashboards/object-store/ --- # Grafana Mimir Object Store dashboard diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/object-store/mimir-object-store.png b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/object-store/mimir-object-store.png similarity index 100% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/object-store/mimir-object-store.png rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/object-store/mimir-object-store.png diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/overrides/index.md b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/overrides/index.md similarity index 86% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/overrides/index.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/overrides/index.md index 9e1737cc3b..8685de0dc3 100644 --- a/docs/sources/operators-guide/visualizing-metrics/dashboards/overrides/index.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/overrides/index.md @@ -3,6 +3,8 @@ title: "Grafana Mimir Overrides dashboard" menuTitle: "Overrides" description: "View an example Overrides dashboard." weight: 70 +aliases: + - ../../visualizing-metrics/dashboards/overrides/ --- # Grafana Mimir Overrides dashboard diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/overrides/mimir-overrides.png b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/overrides/mimir-overrides.png similarity index 100% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/overrides/mimir-overrides.png rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/overrides/mimir-overrides.png diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/queries/index.md b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/queries/index.md similarity index 87% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/queries/index.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/queries/index.md index 8d0ba876f4..ee45b4ecd7 100644 --- a/docs/sources/operators-guide/visualizing-metrics/dashboards/queries/index.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/queries/index.md @@ -3,6 +3,8 @@ title: "Grafana Mimir Queries dashboard" menuTitle: "Queries" description: "View an example Queries dashboard." weight: 80 +aliases: + - ../../visualizing-metrics/dashboards/queries/ --- # Grafana Mimir Queries dashboard diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/queries/mimir-queries.png b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/queries/mimir-queries.png similarity index 100% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/queries/mimir-queries.png rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/queries/mimir-queries.png diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/reads-networking/index.md b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/reads-networking/index.md similarity index 91% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/reads-networking/index.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/reads-networking/index.md index d2158f4133..4399105dd1 100644 --- a/docs/sources/operators-guide/visualizing-metrics/dashboards/reads-networking/index.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/reads-networking/index.md @@ -3,6 +3,8 @@ title: "Grafana Mimir Reads networking dashboard" menuTitle: "Reads networking" description: "View an example Reads networking dashboard." weight: 100 +aliases: + - ../../visualizing-metrics/dashboards/reads-networking/ --- # Grafana Mimir Reads networking dashboard diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/reads-networking/mimir-reads-networking.png b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/reads-networking/mimir-reads-networking.png similarity index 100% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/reads-networking/mimir-reads-networking.png rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/reads-networking/mimir-reads-networking.png diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/reads-resources/index.md b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/reads-resources/index.md similarity index 91% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/reads-resources/index.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/reads-resources/index.md index d6388eab40..89c1314b6d 100644 --- a/docs/sources/operators-guide/visualizing-metrics/dashboards/reads-resources/index.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/reads-resources/index.md @@ -3,6 +3,8 @@ title: "Grafana Mimir Reads resources dashboard" menuTitle: "Reads resources" description: "View an example Reads resources dashboard." weight: 110 +aliases: + - ../../visualizing-metrics/dashboards/reads-resources/ --- # Grafana Mimir Reads resources dashboard diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/reads-resources/mimir-reads-resources.png b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/reads-resources/mimir-reads-resources.png similarity index 100% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/reads-resources/mimir-reads-resources.png rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/reads-resources/mimir-reads-resources.png diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/reads/index.md b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/reads/index.md similarity index 90% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/reads/index.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/reads/index.md index 3a0195ba23..0ea29f6a48 100644 --- a/docs/sources/operators-guide/visualizing-metrics/dashboards/reads/index.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/reads/index.md @@ -3,6 +3,8 @@ title: "Grafana Mimir Reads dashboard" menuTitle: "Reads" description: "View an example Reads dashboard." weight: 90 +aliases: + - ../../visualizing-metrics/dashboards/reads/ --- # Grafana Mimir Reads dashboard diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/reads/mimir-reads.png b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/reads/mimir-reads.png similarity index 100% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/reads/mimir-reads.png rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/reads/mimir-reads.png diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/remote-ruler-reads-resources/index.md b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/remote-ruler-reads-resources/index.md similarity index 91% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/remote-ruler-reads-resources/index.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/remote-ruler-reads-resources/index.md index facf573b9c..dd3e7f34c9 100644 --- a/docs/sources/operators-guide/visualizing-metrics/dashboards/remote-ruler-reads-resources/index.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/remote-ruler-reads-resources/index.md @@ -3,6 +3,8 @@ title: "Grafana Mimir Remote ruler reads resources dashboard" menuTitle: "Remote ruler reads resources" description: "View an example Remote ruler reads resources dashboard." weight: 110 +aliases: + - ../../visualizing-metrics/dashboards/remote-ruler-reads-resources/ --- # Grafana Mimir Remote ruler reads resources dashboard diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/remote-ruler-reads-resources/mimir-remote-ruler-reads-resources.png b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/remote-ruler-reads-resources/mimir-remote-ruler-reads-resources.png similarity index 100% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/remote-ruler-reads-resources/mimir-remote-ruler-reads-resources.png rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/remote-ruler-reads-resources/mimir-remote-ruler-reads-resources.png diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/remote-ruler-reads/index.md b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/remote-ruler-reads/index.md similarity index 90% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/remote-ruler-reads/index.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/remote-ruler-reads/index.md index dd2bed89e1..480d2b81f1 100644 --- a/docs/sources/operators-guide/visualizing-metrics/dashboards/remote-ruler-reads/index.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/remote-ruler-reads/index.md @@ -3,6 +3,8 @@ title: "Grafana Mimir Remote ruler reads dashboard" menuTitle: "Remote ruler reads" description: "View an example Remote ruler reads dashboard." weight: 90 +aliases: + - ../../visualizing-metrics/dashboards/remote-ruler-reads/ --- # Grafana Mimir Remote ruler reads dashboard diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/remote-ruler-reads/mimir-remote-ruler-reads.png b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/remote-ruler-reads/mimir-remote-ruler-reads.png similarity index 100% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/remote-ruler-reads/mimir-remote-ruler-reads.png rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/remote-ruler-reads/mimir-remote-ruler-reads.png diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/rollout-progress/index.md b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/rollout-progress/index.md similarity index 88% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/rollout-progress/index.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/rollout-progress/index.md index 6a36f00b08..f282a27f04 100644 --- a/docs/sources/operators-guide/visualizing-metrics/dashboards/rollout-progress/index.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/rollout-progress/index.md @@ -3,6 +3,8 @@ title: "Grafana Mimir Rollout progress dashboard" menuTitle: "Rollout progress" description: "View an example Rollout progress dashboard." weight: 120 +aliases: + - ../../visualizing-metrics/dashboards/rollout-progress/ --- # Grafana Mimir Rollout progress dashboard diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/rollout-progress/mimir-rollout-progress.png b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/rollout-progress/mimir-rollout-progress.png similarity index 100% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/rollout-progress/mimir-rollout-progress.png rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/rollout-progress/mimir-rollout-progress.png diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/ruler/index.md b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/ruler/index.md similarity index 88% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/ruler/index.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/ruler/index.md index ada5848242..12ab8f872b 100644 --- a/docs/sources/operators-guide/visualizing-metrics/dashboards/ruler/index.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/ruler/index.md @@ -3,6 +3,8 @@ title: "Grafana Mimir Ruler dashboard" menuTitle: "Ruler" description: "View an example Ruler dashboard." weight: 130 +aliases: + - ../../visualizing-metrics/dashboards/ruler/ --- # Grafana Mimir Ruler dashboard diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/ruler/mimir-ruler.png b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/ruler/mimir-ruler.png similarity index 100% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/ruler/mimir-ruler.png rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/ruler/mimir-ruler.png diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/scaling/index.md b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/scaling/index.md similarity index 88% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/scaling/index.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/scaling/index.md index 9c54231b62..2de2a59de8 100644 --- a/docs/sources/operators-guide/visualizing-metrics/dashboards/scaling/index.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/scaling/index.md @@ -3,6 +3,8 @@ title: "Grafana Mimir Scaling dashboard" menuTitle: "Scaling" description: "View an example Scaling dashboard." weight: 140 +aliases: + - ../../visualizing-metrics/dashboards/scaling/ --- # Grafana Mimir scaling dashboard diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/scaling/mimir-scaling.png b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/scaling/mimir-scaling.png similarity index 100% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/scaling/mimir-scaling.png rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/scaling/mimir-scaling.png diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/slow-queries/index.md b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/slow-queries/index.md similarity index 90% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/slow-queries/index.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/slow-queries/index.md index f0cc39dd50..44bbc03971 100644 --- a/docs/sources/operators-guide/visualizing-metrics/dashboards/slow-queries/index.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/slow-queries/index.md @@ -3,6 +3,8 @@ title: "Grafana Mimir Slow queries dashboard" menuTitle: "Slow queries" description: "Review a description of the Slow queries dashboard." weight: 150 +aliases: + - ../../visualizing-metrics/dashboards/slow-queries/ --- # Grafana Mimir Slow queries dashboard diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/tenants/index.md b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/tenants/index.md similarity index 86% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/tenants/index.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/tenants/index.md index bed7d6f451..51ad6f4872 100644 --- a/docs/sources/operators-guide/visualizing-metrics/dashboards/tenants/index.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/tenants/index.md @@ -3,6 +3,8 @@ title: "Grafana Mimir Tenants dashboard" menuTitle: "Tenants" description: "View an example Tenants dashboard." weight: 160 +aliases: + - ../../visualizing-metrics/dashboards/tenants/ --- # Grafana Mimir Tenants dashboard diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/tenants/mimir-tenants.png b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/tenants/mimir-tenants.png similarity index 100% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/tenants/mimir-tenants.png rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/tenants/mimir-tenants.png diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/top-tenants/index.md b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/top-tenants/index.md similarity index 81% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/top-tenants/index.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/top-tenants/index.md index 7dd9bb30c0..3ce659119e 100644 --- a/docs/sources/operators-guide/visualizing-metrics/dashboards/top-tenants/index.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/top-tenants/index.md @@ -3,6 +3,8 @@ title: "Grafana Mimir Top tenants dashboard" menuTitle: "Top tenants" description: "Review a description of the Top tenants dashboard." weight: 170 +aliases: + - ../../visualizing-metrics/dashboards/top-tenants/ --- # Grafana Mimir Top tenants dashboard diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/writes-networking/index.md b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/writes-networking/index.md similarity index 91% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/writes-networking/index.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/writes-networking/index.md index 7cb16bf7f1..c6b6652f3f 100644 --- a/docs/sources/operators-guide/visualizing-metrics/dashboards/writes-networking/index.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/writes-networking/index.md @@ -3,6 +3,8 @@ title: "Grafana Mimir Writes networking dashboard" menuTitle: "Writes networking" description: "View an example Writes networking dashboard." weight: 190 +aliases: + - ../../visualizing-metrics/dashboards/writes-networking/ --- # Grafana Mimir Writes networking dashboard diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/writes-networking/mimir-writes-networking.png b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/writes-networking/mimir-writes-networking.png similarity index 100% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/writes-networking/mimir-writes-networking.png rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/writes-networking/mimir-writes-networking.png diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/writes-resources/index.md b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/writes-resources/index.md similarity index 91% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/writes-resources/index.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/writes-resources/index.md index a0908722c8..ee3456fe45 100644 --- a/docs/sources/operators-guide/visualizing-metrics/dashboards/writes-resources/index.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/writes-resources/index.md @@ -3,6 +3,8 @@ title: "Grafana Mimir Writes resources dashboard" menuTitle: "Writes resources" description: "View an example Writes resources dashboard." weight: 200 +aliases: + - ../../visualizing-metrics/dashboards/writes-resources/ --- # Grafana Mimir Writes resources dashboard diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/writes-resources/mimir-writes-resources.png b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/writes-resources/mimir-writes-resources.png similarity index 100% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/writes-resources/mimir-writes-resources.png rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/writes-resources/mimir-writes-resources.png diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/writes/index.md b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/writes/index.md similarity index 90% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/writes/index.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/writes/index.md index 9d9829953f..26ef61fc99 100644 --- a/docs/sources/operators-guide/visualizing-metrics/dashboards/writes/index.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/writes/index.md @@ -3,6 +3,8 @@ title: "Grafana Mimir Writes dashboard" menuTitle: "Writes" description: "View an example Writes dashboard." weight: 180 +aliases: + - ../../visualizing-metrics/dashboards/writes/ --- # Grafana Mimir Writes dashboard diff --git a/docs/sources/operators-guide/visualizing-metrics/dashboards/writes/mimir-writes.png b/docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/writes/mimir-writes.png similarity index 100% rename from docs/sources/operators-guide/visualizing-metrics/dashboards/writes/mimir-writes.png rename to docs/sources/operators-guide/monitoring-grafana-mimir/dashboards/writes/mimir-writes.png diff --git a/docs/sources/operators-guide/visualizing-metrics/deploying-monitor-mixin.md b/docs/sources/operators-guide/monitoring-grafana-mimir/deploying-monitoring-mixin.md similarity index 96% rename from docs/sources/operators-guide/visualizing-metrics/deploying-monitor-mixin.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/deploying-monitoring-mixin.md index d1e1909d8e..4535d3642c 100644 --- a/docs/sources/operators-guide/visualizing-metrics/deploying-monitor-mixin.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/deploying-monitoring-mixin.md @@ -3,6 +3,8 @@ title: "Deploying the Grafana Mimir monitoring mixin" menuTitle: "Deploying the monitoring mixin" description: "Learn how to deploy the Grafana Mimir monitoring mixin." weight: 20 +aliases: + - ../visualizing-metrics/deploying-monitor-mixin/ --- # Deploying the Grafana Mimir monitoring mixin diff --git a/docs/sources/operators-guide/visualizing-metrics/installing-dashboards-and-alerts.md b/docs/sources/operators-guide/monitoring-grafana-mimir/installing-dashboards-and-alerts.md similarity index 98% rename from docs/sources/operators-guide/visualizing-metrics/installing-dashboards-and-alerts.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/installing-dashboards-and-alerts.md index 3b8e894790..37709e3e4c 100644 --- a/docs/sources/operators-guide/visualizing-metrics/installing-dashboards-and-alerts.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/installing-dashboards-and-alerts.md @@ -3,6 +3,8 @@ title: "Installing Grafana Mimir dashboards and alerts" menuTitle: "Installing dashboards and alerts" description: "Learn how to install Grafana Mimir dashboards and alerts." weight: 30 +aliases: + - ../visualizing-metrics/installing-dashboards-and-alerts/ --- # Installing Grafana Mimir dashboards and alerts diff --git a/docs/sources/operators-guide/visualizing-metrics/requirements.md b/docs/sources/operators-guide/monitoring-grafana-mimir/requirements.md similarity index 99% rename from docs/sources/operators-guide/visualizing-metrics/requirements.md rename to docs/sources/operators-guide/monitoring-grafana-mimir/requirements.md index 3eb9def192..eebf614874 100644 --- a/docs/sources/operators-guide/visualizing-metrics/requirements.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/requirements.md @@ -3,6 +3,8 @@ title: "About Grafana Mimir dashboards and alerts requirements" menuTitle: "About dashboards and alerts requirements" description: "Requirements for installing Grafana Mimir dashboards and alerts." weight: 10 +aliases: + - ../visualizing-metrics/requirements/ --- # About Grafana Mimir dashboards and alerts requirements diff --git a/docs/sources/operators-guide/tools/mimir-continuous-test.md b/docs/sources/operators-guide/tools/mimir-continuous-test.md index 0e50e87f12..f814ad2e63 100644 --- a/docs/sources/operators-guide/tools/mimir-continuous-test.md +++ b/docs/sources/operators-guide/tools/mimir-continuous-test.md @@ -82,5 +82,5 @@ mimir_continuous_test_query_result_checks_failed_total{test=""} ### Alerts -[Grafana Mimir alerts]({{< relref "../visualizing-metrics/installing-dashboards-and-alerts.md" >}}) include checks on failures that mimir-continuous-test tracks. +[Grafana Mimir alerts]({{< relref "../monitoring-grafana-mimir/installing-dashboards-and-alerts.md" >}}) include checks on failures that mimir-continuous-test tracks. When running mimir-continuous-test, use the provided alerts. diff --git a/docs/sources/release-notes/v2.0.md b/docs/sources/release-notes/v2.0.md index 973dcbc1b7..d467018ff2 100644 --- a/docs/sources/release-notes/v2.0.md +++ b/docs/sources/release-notes/v2.0.md @@ -37,7 +37,7 @@ These features and enhancements distinguish Grafana Mimir from Cortex 1.10.0: - We switched to `memberlist` as the default store for Grafana Mimir’s [hash rings]({{< relref "../operators-guide/architecture/hash-ring/index.md" >}}). With this, users no longer have to run Consul or etcd as external dependencies. We made performance optimizations to `memberlist` to reduce its CPU utilization, which ensures that `memberlist` runs smoothly on Grafana Mimir clusters with lots of active series. - - We’ve included our own internal best practice dashboards, mixins, and alerts for [monitoring Grafana Mimir]({{< relref "../operators-guide/visualizing-metrics" >}}). While installing monitoring best practices such as these has typically required use of Jsonnet, we’ve eliminated this requirement. We include dashboards as JSON and alerting and recording rules as YAML which can be directly imported into your Grafana and Prometheus deployments. The alerts are accompanied by [runbooks]({{< relref "../operators-guide/mimir-runbooks/_index.md" >}}) distilled from our own internal operations. + - We’ve included our own internal best practice dashboards, mixins, and alerts for [monitoring Grafana Mimir]({{< relref "../operators-guide/monitoring-grafana-mimir" >}}). While installing monitoring best practices such as these has typically required use of Jsonnet, we’ve eliminated this requirement. We include dashboards as JSON and alerting and recording rules as YAML which can be directly imported into your Grafana and Prometheus deployments. The alerts are accompanied by [runbooks]({{< relref "../operators-guide/mimir-runbooks/_index.md" >}}) distilled from our own internal operations. - **Configuration parameter reduction and classification**: We removed 36% of the configuration parameters in Grafana Mimir. All remaining configuration parameters have been classified as basic, advanced, or experimental. This is meant to make Grafana Mimir’s configuration more approachable for new users. In a default installation, you can focus exclusively on basic configuration. As you become more familiar with Grafana Mimir and want to push your clusters further, you can choose to tune advanced parameters or use experimental parameters. Refer to [parameter categories]({{< relref "../operators-guide/configuring/reference-configuration-parameters/index.md#parameter-categories" >}}) to learn more. diff --git a/operations/mimir-mixin-tools/screenshots/run.sh b/operations/mimir-mixin-tools/screenshots/run.sh index e2808637dc..0a19be369b 100755 --- a/operations/mimir-mixin-tools/screenshots/run.sh +++ b/operations/mimir-mixin-tools/screenshots/run.sh @@ -67,7 +67,7 @@ docker run \ --env "MIMIR_NAMESPACE=${MIMIR_NAMESPACE}" \ --env "ALERTMANAGER_NAMESPACE=${ALERTMANAGER_NAMESPACE}" \ -v "${SCRIPT_DIR}/../../mimir-mixin-compiled/dashboards:/input" \ - -v "${SCRIPT_DIR}/../../../docs/sources/operators-guide/visualizing-metrics/dashboards:/output" \ + -v "${SCRIPT_DIR}/../../../docs/sources/operators-guide/monitoring-grafana-mimir/dashboards:/output" \ -v "${SCRIPT_DIR}:/sources" \ --entrypoint "" \ "${DOCKER_APP_IMAGE}" \ diff --git a/operations/mimir-mixin/config.libsonnet b/operations/mimir-mixin/config.libsonnet index 11e28d6a0a..adbeafc3b4 100644 --- a/operations/mimir-mixin/config.libsonnet +++ b/operations/mimir-mixin/config.libsonnet @@ -19,7 +19,7 @@ // These are used by the dashboards and allow for the simultaneous display of // microservice and single binary Mimir clusters. // Whenever you do any change here, please reflect it in the doc at: - // docs/sources/operators-guide/visualizing-metrics/requirements.md + // docs/sources/operators-guide/monitoring-grafana-mimir/requirements.md job_names: { ingester: '(ingester.*|cortex|mimir)', // Match also custom and per-zone ingester deployments. distributor: '(distributor|cortex|mimir)', From 5b6e25920ef68b142dfcafd1e35783174316e0c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20=C5=A0tibran=C3=BD?= Date: Tue, 21 Jun 2022 08:27:46 +0200 Subject: [PATCH 11/63] Enable consul only if memberlist is disabled, or when migration to memberlist is in progress. (#2152) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Enable consul only if memberlist is disabled, or when migration to memberlist is in progress. Signed-off-by: Peter Štibraný * CHANGELOG.md Signed-off-by: Peter Štibraný --- CHANGELOG.md | 2 +- .../test-gossip-disable-consul-generated.yaml | 1426 ----------------- .../test-gossip-disable-consul.jsonnet | 24 - .../mimir-tests/test-gossip-generated.yaml | 406 ----- .../test-gossip-multi-zone-generated.yaml | 406 ----- ...est-gossip-multikv-teardown-generated.yaml | 406 ----- .../test-gossip-ruler-disabled-generated.yaml | 406 ----- operations/mimir/consul.libsonnet | 2 +- 8 files changed, 2 insertions(+), 3076 deletions(-) delete mode 100644 operations/mimir-tests/test-gossip-disable-consul-generated.yaml delete mode 100644 operations/mimir-tests/test-gossip-disable-consul.jsonnet diff --git a/CHANGELOG.md b/CHANGELOG.md index ea5f958f46..31e4dbe998 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -77,7 +77,7 @@ * `autoscaling_prometheus_url`: Prometheus base URL from which to scrape Mimir metrics (e.g. `http://prometheus.default:9090/prometheus`). * [FEATURE] Jsonnet: Add support for ruler remote evaluation mode (`ruler_remote_evaluation_enabled`), which deploys and uses a dedicated query path for rule evaluation. This enables the benefits of the query-frontend for rule evaluation, such as query sharding. #2073 * [ENHANCEMENT] Added `compactor` service, that can be used to route requests directly to compactor (e.g. admin UI). #2063 -* [ENHANCEMENT] Added a `consul_enabled` configuration option that defaults to true (matching previous behavior) to provide the ability to disable consul. #2093 +* [ENHANCEMENT] Added a `consul_enabled` configuration option to provide the ability to disable consul. It is automatically set to false when `memberlist_ring_enabled` is true and `multikv_migration_enabled` (used for migration from Consul to memberlist) is not set. #2093 #2152 ### Mimirtool diff --git a/operations/mimir-tests/test-gossip-disable-consul-generated.yaml b/operations/mimir-tests/test-gossip-disable-consul-generated.yaml deleted file mode 100644 index 6ea4883c89..0000000000 --- a/operations/mimir-tests/test-gossip-disable-consul-generated.yaml +++ /dev/null @@ -1,1426 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: default ---- -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - labels: - name: alertmanager-pdb - name: alertmanager-pdb - namespace: default -spec: - maxUnavailable: 1 - selector: - matchLabels: - name: alertmanager ---- -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - labels: - name: ingester-pdb - name: ingester-pdb - namespace: default -spec: - maxUnavailable: 1 - selector: - matchLabels: - name: ingester ---- -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - labels: - name: store-gateway-pdb - name: store-gateway-pdb - namespace: default -spec: - maxUnavailable: 2 - selector: - matchLabels: - name: store-gateway ---- -apiVersion: v1 -data: - overrides.yaml: | - overrides: {} -kind: ConfigMap -metadata: - name: overrides - namespace: default ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: alertmanager - name: alertmanager - namespace: default -spec: - clusterIP: None - ports: - - name: alertmanager-http-metrics - port: 8080 - targetPort: 8080 - - name: alertmanager-grpc - port: 9095 - targetPort: 9095 - - name: alertmanager-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: alertmanager ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: compactor - name: compactor - namespace: default -spec: - clusterIP: None - ports: - - name: compactor-http-metrics - port: 8080 - targetPort: 8080 - - name: compactor-grpc - port: 9095 - targetPort: 9095 - - name: compactor-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: compactor ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: distributor - name: distributor - namespace: default -spec: - clusterIP: None - ports: - - name: distributor-http-metrics - port: 8080 - targetPort: 8080 - - name: distributor-grpc - port: 9095 - targetPort: 9095 - - name: distributor-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: distributor ---- -apiVersion: v1 -kind: Service -metadata: - name: gossip-ring - namespace: default -spec: - clusterIP: None - ports: - - name: gossip-ring - port: 7946 - protocol: TCP - targetPort: 7946 - selector: - gossip_ring_member: "true" ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: ingester - name: ingester - namespace: default -spec: - ports: - - name: ingester-http-metrics - port: 8080 - targetPort: 8080 - - name: ingester-grpc - port: 9095 - targetPort: 9095 - - name: ingester-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: ingester ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: memcached - name: memcached - namespace: default -spec: - clusterIP: None - ports: - - name: memcached-client - port: 11211 - targetPort: 11211 - - name: exporter-http-metrics - port: 9150 - targetPort: 9150 - selector: - name: memcached ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: memcached-frontend - name: memcached-frontend - namespace: default -spec: - clusterIP: None - ports: - - name: memcached-client - port: 11211 - targetPort: 11211 - - name: exporter-http-metrics - port: 9150 - targetPort: 9150 - selector: - name: memcached-frontend ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: memcached-index-queries - name: memcached-index-queries - namespace: default -spec: - clusterIP: None - ports: - - name: memcached-client - port: 11211 - targetPort: 11211 - - name: exporter-http-metrics - port: 9150 - targetPort: 9150 - selector: - name: memcached-index-queries ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: memcached-metadata - name: memcached-metadata - namespace: default -spec: - clusterIP: None - ports: - - name: memcached-client - port: 11211 - targetPort: 11211 - - name: exporter-http-metrics - port: 9150 - targetPort: 9150 - selector: - name: memcached-metadata ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: querier - name: querier - namespace: default -spec: - ports: - - name: querier-http-metrics - port: 8080 - targetPort: 8080 - - name: querier-grpc - port: 9095 - targetPort: 9095 - - name: querier-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: querier ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: query-frontend - name: query-frontend - namespace: default -spec: - ports: - - name: query-frontend-http-metrics - port: 8080 - targetPort: 8080 - - name: query-frontend-grpc - port: 9095 - targetPort: 9095 - selector: - name: query-frontend ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: query-frontend - name: query-frontend-discovery - namespace: default -spec: - clusterIP: None - ports: - - name: query-frontend-http-metrics - port: 8080 - targetPort: 8080 - - name: query-frontend-grpc - port: 9095 - targetPort: 9095 - publishNotReadyAddresses: true - selector: - name: query-frontend ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: query-scheduler - name: query-scheduler - namespace: default -spec: - ports: - - name: query-scheduler-http-metrics - port: 8080 - targetPort: 8080 - - name: query-scheduler-grpc - port: 9095 - targetPort: 9095 - selector: - name: query-scheduler ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: query-scheduler - name: query-scheduler-discovery - namespace: default -spec: - clusterIP: None - ports: - - name: query-scheduler-http-metrics - port: 8080 - targetPort: 8080 - - name: query-scheduler-grpc - port: 9095 - targetPort: 9095 - publishNotReadyAddresses: true - selector: - name: query-scheduler ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: ruler - name: ruler - namespace: default -spec: - ports: - - name: ruler-http-metrics - port: 8080 - targetPort: 8080 - - name: ruler-grpc - port: 9095 - targetPort: 9095 - selector: - name: ruler ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: store-gateway - name: store-gateway - namespace: default -spec: - ports: - - name: store-gateway-http-metrics - port: 8080 - targetPort: 8080 - - name: store-gateway-grpc - port: 9095 - targetPort: 9095 - - name: store-gateway-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: store-gateway ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: distributor - namespace: default -spec: - minReadySeconds: 10 - replicas: 3 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: distributor - strategy: - rollingUpdate: - maxSurge: 5 - maxUnavailable: 1 - template: - metadata: - labels: - gossip_ring_member: "true" - name: distributor - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: distributor - topologyKey: kubernetes.io/hostname - containers: - - args: - - -distributor.ha-tracker.enable=true - - -distributor.ha-tracker.enable-for-all-users=true - - -distributor.ha-tracker.etcd.endpoints=etcd-client.default.svc.cluster.local.:2379 - - -distributor.ha-tracker.prefix=prom_ha/ - - -distributor.ha-tracker.store=etcd - - -distributor.health-check-ingesters=true - - -distributor.ingestion-burst-size=200000 - - -distributor.ingestion-rate-limit=10000 - - -distributor.ring.prefix= - - -distributor.ring.store=memberlist - - -ingester.ring.heartbeat-timeout=10m - - -ingester.ring.prefix= - - -ingester.ring.replication-factor=3 - - -ingester.ring.store=memberlist - - -mem-ballast-size-bytes=1073741824 - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.max-connection-age=2m - - -server.grpc.keepalive.max-connection-age-grace=5m - - -server.grpc.keepalive.max-connection-idle=1m - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -target=distributor - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: distributor - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 4Gi - requests: - cpu: "2" - memory: 2Gi - volumeMounts: - - mountPath: /etc/mimir - name: overrides - volumes: - - configMap: - name: overrides - name: overrides ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: querier - namespace: default -spec: - minReadySeconds: 10 - replicas: 6 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: querier - strategy: - rollingUpdate: - maxSurge: 5 - maxUnavailable: 1 - template: - metadata: - labels: - gossip_ring_member: "true" - name: querier - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: querier - topologyKey: kubernetes.io/hostname - containers: - - args: - - -blocks-storage.backend=gcs - - -blocks-storage.bucket-store.metadata-cache.backend=memcached - - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 - - -blocks-storage.bucket-store.sync-dir=/data/tsdb - - -blocks-storage.bucket-store.sync-interval=15m - - -blocks-storage.gcs.bucket-name=blocks-bucket - - -distributor.health-check-ingesters=true - - -ingester.ring.heartbeat-timeout=10m - - -ingester.ring.prefix= - - -ingester.ring.replication-factor=3 - - -ingester.ring.store=memberlist - - -mem-ballast-size-bytes=268435456 - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -querier.frontend-client.grpc-max-send-msg-size=104857600 - - -querier.max-concurrent=8 - - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -server.http-write-timeout=1m - - -store-gateway.sharding-ring.prefix= - - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=memberlist - - -store.max-query-length=768h - - -target=querier - env: - - name: JAEGER_REPORTER_MAX_QUEUE_SIZE - value: "1024" - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: querier - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 24Gi - requests: - cpu: "1" - memory: 12Gi - volumeMounts: - - mountPath: /etc/mimir - name: overrides - volumes: - - configMap: - name: overrides - name: overrides ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: query-frontend - namespace: default -spec: - minReadySeconds: 10 - replicas: 2 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: query-frontend - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - template: - metadata: - labels: - name: query-frontend - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: query-frontend - topologyKey: kubernetes.io/hostname - containers: - - args: - - -query-frontend.align-querier-with-step=false - - -query-frontend.cache-results=true - - -query-frontend.max-cache-freshness=10m - - -query-frontend.results-cache.backend=memcached - - -query-frontend.results-cache.memcached.addresses=dnssrvnoa+memcached-frontend.default.svc.cluster.local:11211 - - -query-frontend.results-cache.memcached.timeout=500ms - - -query-frontend.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -server.http-write-timeout=1m - - -store.max-query-length=12000h - - -target=query-frontend - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: query-frontend - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 1200Mi - requests: - cpu: "2" - memory: 600Mi - volumeMounts: - - mountPath: /etc/mimir - name: overrides - volumes: - - configMap: - name: overrides - name: overrides ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: query-scheduler - namespace: default -spec: - minReadySeconds: 10 - replicas: 2 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: query-scheduler - strategy: - rollingUpdate: - maxSurge: 0 - maxUnavailable: 1 - template: - metadata: - labels: - name: query-scheduler - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: query-scheduler - topologyKey: kubernetes.io/hostname - containers: - - args: - - -query-scheduler.max-outstanding-requests-per-tenant=100 - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -target=query-scheduler - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: query-scheduler - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 2Gi - requests: - cpu: "2" - memory: 1Gi - volumeMounts: - - mountPath: /etc/mimir - name: overrides - volumes: - - configMap: - name: overrides - name: overrides ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: ruler - namespace: default -spec: - minReadySeconds: 10 - replicas: 2 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: ruler - strategy: - rollingUpdate: - maxSurge: 0 - maxUnavailable: 1 - template: - metadata: - labels: - gossip_ring_member: "true" - name: ruler - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: ruler - topologyKey: kubernetes.io/hostname - containers: - - args: - - -blocks-storage.backend=gcs - - -blocks-storage.bucket-store.metadata-cache.backend=memcached - - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 - - -blocks-storage.bucket-store.sync-dir=/data/tsdb - - -blocks-storage.bucket-store.sync-interval=15m - - -blocks-storage.gcs.bucket-name=blocks-bucket - - -distributor.health-check-ingesters=true - - -ingester.ring.heartbeat-timeout=10m - - -ingester.ring.prefix= - - -ingester.ring.replication-factor=3 - - -ingester.ring.store=memberlist - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -ruler-storage.backend=gcs - - -ruler-storage.gcs.bucket-name=rules-bucket - - -ruler.alertmanager-url=http://alertmanager.default.svc.cluster.local/alertmanager - - -ruler.max-rule-groups-per-tenant=35 - - -ruler.max-rules-per-rule-group=20 - - -ruler.ring.store=memberlist - - -ruler.rule-path=/rules - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.prefix= - - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=memberlist - - -store.max-query-length=768h - - -target=ruler - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: ruler - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - cpu: "16" - memory: 16Gi - requests: - cpu: "1" - memory: 6Gi - volumeMounts: - - mountPath: /etc/mimir - name: overrides - terminationGracePeriodSeconds: 600 - volumes: - - configMap: - name: overrides - name: overrides ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - labels: - name: alertmanager - name: alertmanager - namespace: default -spec: - replicas: 3 - selector: - matchLabels: - name: alertmanager - serviceName: alertmanager - template: - metadata: - labels: - gossip_ring_member: "true" - name: alertmanager - spec: - containers: - - args: - - -alertmanager-storage.backend=gcs - - -alertmanager-storage.gcs.bucket-name=alerts-bucket - - -alertmanager.sharding-ring.replication-factor=3 - - -alertmanager.sharding-ring.store=memberlist - - -alertmanager.storage.path=/data - - -alertmanager.web.external-url=http://test/alertmanager - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -target=alertmanager - env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: alertmanager - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 15Gi - requests: - cpu: "2" - memory: 10Gi - volumeMounts: - - mountPath: /data - name: alertmanager-data - - mountPath: /etc/mimir - name: overrides - securityContext: - runAsUser: 0 - terminationGracePeriodSeconds: 900 - volumes: - - configMap: - name: overrides - name: overrides - updateStrategy: - type: RollingUpdate - volumeClaimTemplates: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: alertmanager-data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 100Gi ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - labels: - name: compactor - name: compactor - namespace: default -spec: - podManagementPolicy: Parallel - replicas: 1 - selector: - matchLabels: - name: compactor - serviceName: compactor - template: - metadata: - labels: - gossip_ring_member: "true" - name: compactor - spec: - containers: - - args: - - -blocks-storage.backend=gcs - - -blocks-storage.gcs.bucket-name=blocks-bucket - - -compactor.block-ranges=2h,12h,24h - - -compactor.blocks-retention-period=0 - - -compactor.cleanup-interval=15m - - -compactor.compaction-concurrency=1 - - -compactor.compaction-interval=30m - - -compactor.compactor-tenant-shard-size=1 - - -compactor.data-dir=/data - - -compactor.deletion-delay=2h - - -compactor.max-closing-blocks-concurrency=2 - - -compactor.max-opening-blocks-concurrency=4 - - -compactor.ring.prefix= - - -compactor.ring.store=memberlist - - -compactor.ring.wait-stability-min-duration=1m - - -compactor.split-and-merge-shards=0 - - -compactor.split-groups=1 - - -compactor.symbols-flushers-concurrency=4 - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -target=compactor - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: compactor - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 6Gi - requests: - cpu: 1 - memory: 6Gi - volumeMounts: - - mountPath: /data - name: compactor-data - - mountPath: /etc/mimir - name: overrides - securityContext: - runAsUser: 0 - terminationGracePeriodSeconds: 900 - volumes: - - configMap: - name: overrides - name: overrides - updateStrategy: - type: RollingUpdate - volumeClaimTemplates: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: compactor-data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 250Gi - storageClassName: standard ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - labels: - name: ingester - name: ingester - namespace: default -spec: - podManagementPolicy: Parallel - replicas: 3 - selector: - matchLabels: - name: ingester - serviceName: ingester - template: - metadata: - labels: - gossip_ring_member: "true" - name: ingester - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: ingester - topologyKey: kubernetes.io/hostname - containers: - - args: - - -blocks-storage.backend=gcs - - -blocks-storage.gcs.bucket-name=blocks-bucket - - -blocks-storage.tsdb.block-ranges-period=2h - - -blocks-storage.tsdb.dir=/data/tsdb - - -blocks-storage.tsdb.ship-interval=1m - - -distributor.health-check-ingesters=true - - -ingester.max-global-series-per-metric=20000 - - -ingester.max-global-series-per-user=150000 - - -ingester.ring.heartbeat-period=15s - - -ingester.ring.heartbeat-timeout=10m - - -ingester.ring.num-tokens=512 - - -ingester.ring.prefix= - - -ingester.ring.readiness-check-ring-health=false - - -ingester.ring.replication-factor=3 - - -ingester.ring.store=memberlist - - -ingester.ring.tokens-file-path=/data/tokens - - -ingester.ring.unregister-on-shutdown=true - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc-max-concurrent-streams=10000 - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -target=ingester - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: ingester - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 25Gi - requests: - cpu: "4" - memory: 15Gi - volumeMounts: - - mountPath: /data - name: ingester-data - - mountPath: /etc/mimir - name: overrides - securityContext: - runAsUser: 0 - terminationGracePeriodSeconds: 1200 - volumes: - - configMap: - name: overrides - name: overrides - updateStrategy: - type: RollingUpdate - volumeClaimTemplates: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: ingester-data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 100Gi - storageClassName: fast ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: memcached - namespace: default -spec: - replicas: 3 - selector: - matchLabels: - name: memcached - serviceName: memcached - template: - metadata: - labels: - name: memcached - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: memcached - topologyKey: kubernetes.io/hostname - containers: - - args: - - -m 6144 - - -I 1m - - -c 16384 - - -v - image: memcached:1.6.9-alpine - imagePullPolicy: IfNotPresent - name: memcached - ports: - - containerPort: 11211 - name: client - resources: - limits: - memory: 9Gi - requests: - cpu: 500m - memory: 6552Mi - - args: - - --memcached.address=localhost:11211 - - --web.listen-address=0.0.0.0:9150 - image: prom/memcached-exporter:v0.6.0 - imagePullPolicy: IfNotPresent - name: exporter - ports: - - containerPort: 9150 - name: http-metrics - updateStrategy: - type: RollingUpdate ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: memcached-frontend - namespace: default -spec: - replicas: 3 - selector: - matchLabels: - name: memcached-frontend - serviceName: memcached-frontend - template: - metadata: - labels: - name: memcached-frontend - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: memcached-frontend - topologyKey: kubernetes.io/hostname - containers: - - args: - - -m 1024 - - -I 5m - - -c 1024 - - -v - image: memcached:1.6.9-alpine - imagePullPolicy: IfNotPresent - name: memcached - ports: - - containerPort: 11211 - name: client - resources: - limits: - memory: 1536Mi - requests: - cpu: 500m - memory: 1329Mi - - args: - - --memcached.address=localhost:11211 - - --web.listen-address=0.0.0.0:9150 - image: prom/memcached-exporter:v0.6.0 - imagePullPolicy: IfNotPresent - name: exporter - ports: - - containerPort: 9150 - name: http-metrics - updateStrategy: - type: RollingUpdate ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: memcached-index-queries - namespace: default -spec: - replicas: 3 - selector: - matchLabels: - name: memcached-index-queries - serviceName: memcached-index-queries - template: - metadata: - labels: - name: memcached-index-queries - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: memcached-index-queries - topologyKey: kubernetes.io/hostname - containers: - - args: - - -m 1024 - - -I 5m - - -c 16384 - - -v - image: memcached:1.6.9-alpine - imagePullPolicy: IfNotPresent - name: memcached - ports: - - containerPort: 11211 - name: client - resources: - limits: - memory: 1536Mi - requests: - cpu: 500m - memory: 1329Mi - - args: - - --memcached.address=localhost:11211 - - --web.listen-address=0.0.0.0:9150 - image: prom/memcached-exporter:v0.6.0 - imagePullPolicy: IfNotPresent - name: exporter - ports: - - containerPort: 9150 - name: http-metrics - updateStrategy: - type: RollingUpdate ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: memcached-metadata - namespace: default -spec: - replicas: 1 - selector: - matchLabels: - name: memcached-metadata - serviceName: memcached-metadata - template: - metadata: - labels: - name: memcached-metadata - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: memcached-metadata - topologyKey: kubernetes.io/hostname - containers: - - args: - - -m 512 - - -I 1m - - -c 16384 - - -v - image: memcached:1.6.9-alpine - imagePullPolicy: IfNotPresent - name: memcached - ports: - - containerPort: 11211 - name: client - resources: - limits: - memory: 768Mi - requests: - cpu: 500m - memory: 715Mi - - args: - - --memcached.address=localhost:11211 - - --web.listen-address=0.0.0.0:9150 - image: prom/memcached-exporter:v0.6.0 - imagePullPolicy: IfNotPresent - name: exporter - ports: - - containerPort: 9150 - name: http-metrics - updateStrategy: - type: RollingUpdate ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - labels: - name: store-gateway - name: store-gateway - namespace: default -spec: - podManagementPolicy: Parallel - replicas: 3 - selector: - matchLabels: - name: store-gateway - serviceName: store-gateway - template: - metadata: - labels: - gossip_ring_member: "true" - name: store-gateway - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: store-gateway - topologyKey: kubernetes.io/hostname - containers: - - args: - - -blocks-storage.backend=gcs - - -blocks-storage.bucket-store.chunks-cache.backend=memcached - - -blocks-storage.bucket-store.chunks-cache.memcached.addresses=dnssrvnoa+memcached.default.svc.cluster.local:11211 - - -blocks-storage.bucket-store.chunks-cache.memcached.max-async-concurrency=50 - - -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100 - - -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=100 - - -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576 - - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms - - -blocks-storage.bucket-store.index-cache.backend=memcached - - -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local:11211 - - -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50 - - -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100 - - -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=100 - - -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880 - - -blocks-storage.bucket-store.index-header-lazy-loading-enabled=true - - -blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout=60m - - -blocks-storage.bucket-store.max-chunk-pool-bytes=12884901888 - - -blocks-storage.bucket-store.metadata-cache.backend=memcached - - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-get-multi-concurrency=100 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-idle-connections=100 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 - - -blocks-storage.bucket-store.sync-dir=/data/tsdb - - -blocks-storage.bucket-store.sync-interval=15m - - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.prefix= - - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=memberlist - - -store-gateway.sharding-ring.tokens-file-path=/data/tokens - - -store-gateway.sharding-ring.wait-stability-min-duration=1m - - -target=store-gateway - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: store-gateway - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 18Gi - requests: - cpu: "1" - memory: 12Gi - volumeMounts: - - mountPath: /data - name: store-gateway-data - - mountPath: /etc/mimir - name: overrides - securityContext: - runAsUser: 0 - terminationGracePeriodSeconds: 120 - volumes: - - configMap: - name: overrides - name: overrides - updateStrategy: - type: RollingUpdate - volumeClaimTemplates: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: store-gateway-data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 50Gi - storageClassName: standard ---- -apiVersion: etcd.database.coreos.com/v1beta2 -kind: EtcdCluster -metadata: - annotations: - etcd.database.coreos.com/scope: clusterwide - name: etcd - namespace: default -spec: - pod: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - etcd_cluster: etcd - topologyKey: kubernetes.io/hostname - annotations: - prometheus.io/port: "2379" - prometheus.io/scrape: "true" - etcdEnv: - - name: ETCD_AUTO_COMPACTION_RETENTION - value: 1h - labels: - name: etcd - resources: - limits: - memory: 512Mi - requests: - cpu: 500m - memory: 512Mi - size: 3 - version: 3.3.13 diff --git a/operations/mimir-tests/test-gossip-disable-consul.jsonnet b/operations/mimir-tests/test-gossip-disable-consul.jsonnet deleted file mode 100644 index 2cd47441a7..0000000000 --- a/operations/mimir-tests/test-gossip-disable-consul.jsonnet +++ /dev/null @@ -1,24 +0,0 @@ -local mimir = import 'mimir/mimir.libsonnet'; - -mimir { - _config+:: { - namespace: 'default', - external_url: 'http://test', - - consul_enabled: false, - memberlist_ring_enabled: true, - - blocks_storage_backend: 'gcs', - blocks_storage_bucket_name: 'blocks-bucket', - bucket_index_enabled: true, - query_scheduler_enabled: true, - - ruler_enabled: true, - ruler_client_type: 'gcs', - ruler_storage_bucket_name: 'rules-bucket', - - alertmanager_enabled: true, - alertmanager_client_type: 'gcs', - alertmanager_gcs_bucket_name: 'alerts-bucket', - }, -} diff --git a/operations/mimir-tests/test-gossip-generated.yaml b/operations/mimir-tests/test-gossip-generated.yaml index a5e960c54c..6ea4883c89 100644 --- a/operations/mimir-tests/test-gossip-generated.yaml +++ b/operations/mimir-tests/test-gossip-generated.yaml @@ -43,225 +43,6 @@ spec: name: store-gateway --- apiVersion: v1 -kind: ServiceAccount -metadata: - name: consul-sidekick - namespace: default ---- -apiVersion: v1 -data: - consul-config.json: '{"leave_on_terminate": true, "raft_snapshot_threshold": 128, - "raft_trailing_logs": 10000, "telemetry": {"dogstatsd_addr": "127.0.0.1:9125"}}' - mapping: | - mappings: - - match: consul.*.runtime.* - name: consul_runtime - labels: - type: $2 - - match: consul.runtime.total_gc_pause_ns - name: consul_runtime_total_gc_pause_ns - labels: - type: $2 - - match: consul.consul.health.service.query-tag.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3 - - match: consul.consul.health.service.query-tag.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4 - - match: consul.consul.health.service.query-tag.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11.$12 - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.dns.domain_query.*.*.*.*.* - name: consul_dns_domain_query - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.not-found.* - name: consul_health_service_not_found - labels: - query: $1 - - match: consul.consul.health.service.query.* - name: consul_health_service_query - labels: - query: $1 - - match: consul.*.memberlist.health.score - name: consul_memberlist_health_score - labels: {} - - match: consul.serf.queue.* - name: consul_serf_events - labels: - type: $1 - - match: consul.serf.snapshot.appendLine - name: consul_serf_snapshot_appendLine - labels: - type: $1 - - match: consul.serf.coordinate.adjustment-ms - name: consul_serf_coordinate_adjustment_ms - labels: {} - - match: consul.consul.rpc.query - name: consul_rpc_query - labels: {} - - match: consul.*.consul.session_ttl.active - name: consul_session_ttl_active - labels: {} - - match: consul.raft.rpc.* - name: consul_raft_rpc - labels: - type: $1 - - match: consul.raft.rpc.appendEntries.storeLogs - name: consul_raft_rpc_appendEntries_storeLogs - labels: - type: $1 - - match: consul.consul.fsm.persist - name: consul_fsm_persist - labels: {} - - match: consul.raft.fsm.apply - name: consul_raft_fsm_apply - labels: {} - - match: consul.raft.leader.lastContact - name: consul_raft_leader_lastcontact - labels: {} - - match: consul.raft.leader.dispatchLog - name: consul_raft_leader_dispatchLog - labels: {} - - match: consul.raft.commitTime - name: consul_raft_commitTime - labels: {} - - match: consul.raft.replication.appendEntries.logs.*.*.*.* - name: consul_raft_replication_appendEntries_logs - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.appendEntries.rpc.*.*.*.* - name: consul_raft_replication_appendEntries_rpc - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.heartbeat.*.*.*.* - name: consul_raft_replication_heartbeat - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.consul.rpc.request - name: consul_rpc_requests - labels: {} - - match: consul.consul.rpc.accept_conn - name: consul_rpc_accept_conn - labels: {} - - match: consul.memberlist.udp.* - name: consul_memberlist_udp - labels: - type: $1 - - match: consul.memberlist.tcp.* - name: consul_memberlist_tcp - labels: - type: $1 - - match: consul.memberlist.gossip - name: consul_memberlist_gossip - labels: {} - - match: consul.memberlist.probeNode - name: consul_memberlist_probenode - labels: {} - - match: consul.memberlist.pushPullNode - name: consul_memberlist_pushpullnode - labels: {} - - match: consul.http.* - name: consul_http_request - labels: - method: $1 - path: / - - match: consul.http.*.* - name: consul_http_request - labels: - method: $1 - path: /$2 - - match: consul.http.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3 - - match: consul.http.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4 - - match: consul.http.*.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4/$5 - - match: consul.consul.leader.barrier - name: consul_leader_barrier - labels: {} - - match: consul.consul.leader.reconcileMember - name: consul_leader_reconcileMember - labels: {} - - match: consul.consul.leader.reconcile - name: consul_leader_reconcile - labels: {} - - match: consul.consul.fsm.coordinate.batch-update - name: consul_fsm_coordinate_batch_update - labels: {} - - match: consul.consul.fsm.autopilot - name: consul_fsm_autopilot - labels: {} - - match: consul.consul.fsm.kvs.cas - name: consul_fsm_kvs_cas - labels: {} - - match: consul.consul.fsm.register - name: consul_fsm_register - labels: {} - - match: consul.consul.fsm.deregister - name: consul_fsm_deregister - labels: {} - - match: consul.consul.fsm.tombstone.reap - name: consul_fsm_tombstone_reap - labels: {} - - match: consul.consul.catalog.register - name: consul_catalog_register - labels: {} - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.leader.reapTombstones - name: consul_leader_reapTombstones - labels: {} -kind: ConfigMap -metadata: - name: consul - namespace: default ---- -apiVersion: v1 data: overrides.yaml: | overrides: {} @@ -270,38 +51,6 @@ metadata: name: overrides namespace: default --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: consul-sidekick - namespace: default -rules: -- apiGroups: - - "" - - extensions - - apps - resources: - - pods - - replicasets - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: consul-sidekick - namespace: default -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: consul-sidekick -subjects: -- kind: ServiceAccount - name: consul-sidekick - namespace: default ---- apiVersion: v1 kind: Service metadata: @@ -348,36 +97,6 @@ spec: --- apiVersion: v1 kind: Service -metadata: - labels: - name: consul - name: consul - namespace: default -spec: - ports: - - name: consul-server - port: 8300 - targetPort: 8300 - - name: consul-serf - port: 8301 - targetPort: 8301 - - name: consul-client - port: 8400 - targetPort: 8400 - - name: consul-api - port: 8500 - targetPort: 8500 - - name: statsd-exporter-http-metrics - port: 8000 - targetPort: 8000 - - name: consul-exporter-http-metrics - port: 9107 - targetPort: 9107 - selector: - name: consul ---- -apiVersion: v1 -kind: Service metadata: labels: name: distributor @@ -648,131 +367,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - name: consul - namespace: default -spec: - minReadySeconds: 10 - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: consul - template: - metadata: - annotations: - consul-hash: e56ef6821a3557604caccaf6d5820239 - labels: - name: consul - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: consul - topologyKey: kubernetes.io/hostname - - labelSelector: - matchLabels: - name: ingester - namespaces: - - default - topologyKey: kubernetes.io/hostname - containers: - - args: - - agent - - -ui - - -server - - -client=0.0.0.0 - - -config-file=/etc/config/consul-config.json - - -bootstrap-expect=1 - - -ui-content-path=/default/consul/ - env: - - name: CHECKPOINT_DISABLE - value: "1" - image: consul:1.5.3 - imagePullPolicy: IfNotPresent - name: consul - ports: - - containerPort: 8300 - name: server - - containerPort: 8301 - name: serf - - containerPort: 8400 - name: client - - containerPort: 8500 - name: api - resources: - requests: - cpu: "4" - memory: 4Gi - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --namespace=$(POD_NAMESPACE) - - --pod-name=$(POD_NAME) - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: weaveworks/consul-sidekick:master-f18ad13 - imagePullPolicy: IfNotPresent - name: sidekick - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --web.listen-address=:8000 - - --statsd.mapping-config=/etc/config/mapping - image: prom/statsd-exporter:v0.12.2 - imagePullPolicy: IfNotPresent - name: statsd-exporter - ports: - - containerPort: 8000 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --consul.server=localhost:8500 - - --web.listen-address=:9107 - - --consul.timeout=1s - - --no-consul.health-summary - - --consul.allow_stale - image: prom/consul-exporter:v0.5.0 - imagePullPolicy: IfNotPresent - name: consul-exporter - ports: - - containerPort: 9107 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - serviceAccount: consul-sidekick - volumes: - - configMap: - name: consul - name: consul - - emptyDir: - medium: Memory - name: data ---- -apiVersion: apps/v1 -kind: Deployment metadata: name: distributor namespace: default diff --git a/operations/mimir-tests/test-gossip-multi-zone-generated.yaml b/operations/mimir-tests/test-gossip-multi-zone-generated.yaml index 53803f73a2..90b24fa612 100644 --- a/operations/mimir-tests/test-gossip-multi-zone-generated.yaml +++ b/operations/mimir-tests/test-gossip-multi-zone-generated.yaml @@ -44,230 +44,11 @@ spec: --- apiVersion: v1 kind: ServiceAccount -metadata: - name: consul-sidekick - namespace: default ---- -apiVersion: v1 -kind: ServiceAccount metadata: name: rollout-operator namespace: default --- apiVersion: v1 -data: - consul-config.json: '{"leave_on_terminate": true, "raft_snapshot_threshold": 128, - "raft_trailing_logs": 10000, "telemetry": {"dogstatsd_addr": "127.0.0.1:9125"}}' - mapping: | - mappings: - - match: consul.*.runtime.* - name: consul_runtime - labels: - type: $2 - - match: consul.runtime.total_gc_pause_ns - name: consul_runtime_total_gc_pause_ns - labels: - type: $2 - - match: consul.consul.health.service.query-tag.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3 - - match: consul.consul.health.service.query-tag.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4 - - match: consul.consul.health.service.query-tag.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11.$12 - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.dns.domain_query.*.*.*.*.* - name: consul_dns_domain_query - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.not-found.* - name: consul_health_service_not_found - labels: - query: $1 - - match: consul.consul.health.service.query.* - name: consul_health_service_query - labels: - query: $1 - - match: consul.*.memberlist.health.score - name: consul_memberlist_health_score - labels: {} - - match: consul.serf.queue.* - name: consul_serf_events - labels: - type: $1 - - match: consul.serf.snapshot.appendLine - name: consul_serf_snapshot_appendLine - labels: - type: $1 - - match: consul.serf.coordinate.adjustment-ms - name: consul_serf_coordinate_adjustment_ms - labels: {} - - match: consul.consul.rpc.query - name: consul_rpc_query - labels: {} - - match: consul.*.consul.session_ttl.active - name: consul_session_ttl_active - labels: {} - - match: consul.raft.rpc.* - name: consul_raft_rpc - labels: - type: $1 - - match: consul.raft.rpc.appendEntries.storeLogs - name: consul_raft_rpc_appendEntries_storeLogs - labels: - type: $1 - - match: consul.consul.fsm.persist - name: consul_fsm_persist - labels: {} - - match: consul.raft.fsm.apply - name: consul_raft_fsm_apply - labels: {} - - match: consul.raft.leader.lastContact - name: consul_raft_leader_lastcontact - labels: {} - - match: consul.raft.leader.dispatchLog - name: consul_raft_leader_dispatchLog - labels: {} - - match: consul.raft.commitTime - name: consul_raft_commitTime - labels: {} - - match: consul.raft.replication.appendEntries.logs.*.*.*.* - name: consul_raft_replication_appendEntries_logs - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.appendEntries.rpc.*.*.*.* - name: consul_raft_replication_appendEntries_rpc - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.heartbeat.*.*.*.* - name: consul_raft_replication_heartbeat - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.consul.rpc.request - name: consul_rpc_requests - labels: {} - - match: consul.consul.rpc.accept_conn - name: consul_rpc_accept_conn - labels: {} - - match: consul.memberlist.udp.* - name: consul_memberlist_udp - labels: - type: $1 - - match: consul.memberlist.tcp.* - name: consul_memberlist_tcp - labels: - type: $1 - - match: consul.memberlist.gossip - name: consul_memberlist_gossip - labels: {} - - match: consul.memberlist.probeNode - name: consul_memberlist_probenode - labels: {} - - match: consul.memberlist.pushPullNode - name: consul_memberlist_pushpullnode - labels: {} - - match: consul.http.* - name: consul_http_request - labels: - method: $1 - path: / - - match: consul.http.*.* - name: consul_http_request - labels: - method: $1 - path: /$2 - - match: consul.http.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3 - - match: consul.http.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4 - - match: consul.http.*.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4/$5 - - match: consul.consul.leader.barrier - name: consul_leader_barrier - labels: {} - - match: consul.consul.leader.reconcileMember - name: consul_leader_reconcileMember - labels: {} - - match: consul.consul.leader.reconcile - name: consul_leader_reconcile - labels: {} - - match: consul.consul.fsm.coordinate.batch-update - name: consul_fsm_coordinate_batch_update - labels: {} - - match: consul.consul.fsm.autopilot - name: consul_fsm_autopilot - labels: {} - - match: consul.consul.fsm.kvs.cas - name: consul_fsm_kvs_cas - labels: {} - - match: consul.consul.fsm.register - name: consul_fsm_register - labels: {} - - match: consul.consul.fsm.deregister - name: consul_fsm_deregister - labels: {} - - match: consul.consul.fsm.tombstone.reap - name: consul_fsm_tombstone_reap - labels: {} - - match: consul.consul.catalog.register - name: consul_catalog_register - labels: {} - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.leader.reapTombstones - name: consul_leader_reapTombstones - labels: {} -kind: ConfigMap -metadata: - name: consul - namespace: default ---- -apiVersion: v1 data: overrides.yaml: | overrides: {} @@ -278,24 +59,6 @@ metadata: --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role -metadata: - name: consul-sidekick - namespace: default -rules: -- apiGroups: - - "" - - extensions - - apps - resources: - - pods - - replicasets - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role metadata: name: rollout-operator-role namespace: default @@ -326,20 +89,6 @@ rules: --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding -metadata: - name: consul-sidekick - namespace: default -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: consul-sidekick -subjects: -- kind: ServiceAccount - name: consul-sidekick - namespace: default ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding metadata: name: rollout-operator-rolebinding namespace: default @@ -398,36 +147,6 @@ spec: --- apiVersion: v1 kind: Service -metadata: - labels: - name: consul - name: consul - namespace: default -spec: - ports: - - name: consul-server - port: 8300 - targetPort: 8300 - - name: consul-serf - port: 8301 - targetPort: 8301 - - name: consul-client - port: 8400 - targetPort: 8400 - - name: consul-api - port: 8500 - targetPort: 8500 - - name: statsd-exporter-http-metrics - port: 8000 - targetPort: 8000 - - name: consul-exporter-http-metrics - port: 9107 - targetPort: 9107 - selector: - name: consul ---- -apiVersion: v1 -kind: Service metadata: labels: name: distributor @@ -810,131 +529,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - name: consul - namespace: default -spec: - minReadySeconds: 10 - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: consul - template: - metadata: - annotations: - consul-hash: e56ef6821a3557604caccaf6d5820239 - labels: - name: consul - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: consul - topologyKey: kubernetes.io/hostname - - labelSelector: - matchLabels: - name: ingester - namespaces: - - default - topologyKey: kubernetes.io/hostname - containers: - - args: - - agent - - -ui - - -server - - -client=0.0.0.0 - - -config-file=/etc/config/consul-config.json - - -bootstrap-expect=1 - - -ui-content-path=/default/consul/ - env: - - name: CHECKPOINT_DISABLE - value: "1" - image: consul:1.5.3 - imagePullPolicy: IfNotPresent - name: consul - ports: - - containerPort: 8300 - name: server - - containerPort: 8301 - name: serf - - containerPort: 8400 - name: client - - containerPort: 8500 - name: api - resources: - requests: - cpu: "4" - memory: 4Gi - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --namespace=$(POD_NAMESPACE) - - --pod-name=$(POD_NAME) - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: weaveworks/consul-sidekick:master-f18ad13 - imagePullPolicy: IfNotPresent - name: sidekick - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --web.listen-address=:8000 - - --statsd.mapping-config=/etc/config/mapping - image: prom/statsd-exporter:v0.12.2 - imagePullPolicy: IfNotPresent - name: statsd-exporter - ports: - - containerPort: 8000 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --consul.server=localhost:8500 - - --web.listen-address=:9107 - - --consul.timeout=1s - - --no-consul.health-summary - - --consul.allow_stale - image: prom/consul-exporter:v0.5.0 - imagePullPolicy: IfNotPresent - name: consul-exporter - ports: - - containerPort: 9107 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - serviceAccount: consul-sidekick - volumes: - - configMap: - name: consul - name: consul - - emptyDir: - medium: Memory - name: data ---- -apiVersion: apps/v1 -kind: Deployment metadata: name: distributor namespace: default diff --git a/operations/mimir-tests/test-gossip-multikv-teardown-generated.yaml b/operations/mimir-tests/test-gossip-multikv-teardown-generated.yaml index 1194a53666..a08e590551 100644 --- a/operations/mimir-tests/test-gossip-multikv-teardown-generated.yaml +++ b/operations/mimir-tests/test-gossip-multikv-teardown-generated.yaml @@ -43,225 +43,6 @@ spec: name: store-gateway --- apiVersion: v1 -kind: ServiceAccount -metadata: - name: consul-sidekick - namespace: default ---- -apiVersion: v1 -data: - consul-config.json: '{"leave_on_terminate": true, "raft_snapshot_threshold": 128, - "raft_trailing_logs": 10000, "telemetry": {"dogstatsd_addr": "127.0.0.1:9125"}}' - mapping: | - mappings: - - match: consul.*.runtime.* - name: consul_runtime - labels: - type: $2 - - match: consul.runtime.total_gc_pause_ns - name: consul_runtime_total_gc_pause_ns - labels: - type: $2 - - match: consul.consul.health.service.query-tag.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3 - - match: consul.consul.health.service.query-tag.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4 - - match: consul.consul.health.service.query-tag.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11.$12 - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.dns.domain_query.*.*.*.*.* - name: consul_dns_domain_query - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.not-found.* - name: consul_health_service_not_found - labels: - query: $1 - - match: consul.consul.health.service.query.* - name: consul_health_service_query - labels: - query: $1 - - match: consul.*.memberlist.health.score - name: consul_memberlist_health_score - labels: {} - - match: consul.serf.queue.* - name: consul_serf_events - labels: - type: $1 - - match: consul.serf.snapshot.appendLine - name: consul_serf_snapshot_appendLine - labels: - type: $1 - - match: consul.serf.coordinate.adjustment-ms - name: consul_serf_coordinate_adjustment_ms - labels: {} - - match: consul.consul.rpc.query - name: consul_rpc_query - labels: {} - - match: consul.*.consul.session_ttl.active - name: consul_session_ttl_active - labels: {} - - match: consul.raft.rpc.* - name: consul_raft_rpc - labels: - type: $1 - - match: consul.raft.rpc.appendEntries.storeLogs - name: consul_raft_rpc_appendEntries_storeLogs - labels: - type: $1 - - match: consul.consul.fsm.persist - name: consul_fsm_persist - labels: {} - - match: consul.raft.fsm.apply - name: consul_raft_fsm_apply - labels: {} - - match: consul.raft.leader.lastContact - name: consul_raft_leader_lastcontact - labels: {} - - match: consul.raft.leader.dispatchLog - name: consul_raft_leader_dispatchLog - labels: {} - - match: consul.raft.commitTime - name: consul_raft_commitTime - labels: {} - - match: consul.raft.replication.appendEntries.logs.*.*.*.* - name: consul_raft_replication_appendEntries_logs - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.appendEntries.rpc.*.*.*.* - name: consul_raft_replication_appendEntries_rpc - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.heartbeat.*.*.*.* - name: consul_raft_replication_heartbeat - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.consul.rpc.request - name: consul_rpc_requests - labels: {} - - match: consul.consul.rpc.accept_conn - name: consul_rpc_accept_conn - labels: {} - - match: consul.memberlist.udp.* - name: consul_memberlist_udp - labels: - type: $1 - - match: consul.memberlist.tcp.* - name: consul_memberlist_tcp - labels: - type: $1 - - match: consul.memberlist.gossip - name: consul_memberlist_gossip - labels: {} - - match: consul.memberlist.probeNode - name: consul_memberlist_probenode - labels: {} - - match: consul.memberlist.pushPullNode - name: consul_memberlist_pushpullnode - labels: {} - - match: consul.http.* - name: consul_http_request - labels: - method: $1 - path: / - - match: consul.http.*.* - name: consul_http_request - labels: - method: $1 - path: /$2 - - match: consul.http.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3 - - match: consul.http.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4 - - match: consul.http.*.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4/$5 - - match: consul.consul.leader.barrier - name: consul_leader_barrier - labels: {} - - match: consul.consul.leader.reconcileMember - name: consul_leader_reconcileMember - labels: {} - - match: consul.consul.leader.reconcile - name: consul_leader_reconcile - labels: {} - - match: consul.consul.fsm.coordinate.batch-update - name: consul_fsm_coordinate_batch_update - labels: {} - - match: consul.consul.fsm.autopilot - name: consul_fsm_autopilot - labels: {} - - match: consul.consul.fsm.kvs.cas - name: consul_fsm_kvs_cas - labels: {} - - match: consul.consul.fsm.register - name: consul_fsm_register - labels: {} - - match: consul.consul.fsm.deregister - name: consul_fsm_deregister - labels: {} - - match: consul.consul.fsm.tombstone.reap - name: consul_fsm_tombstone_reap - labels: {} - - match: consul.consul.catalog.register - name: consul_catalog_register - labels: {} - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.leader.reapTombstones - name: consul_leader_reapTombstones - labels: {} -kind: ConfigMap -metadata: - name: consul - namespace: default ---- -apiVersion: v1 data: overrides.yaml: | multi_kv_config: @@ -273,38 +54,6 @@ metadata: name: overrides namespace: default --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: consul-sidekick - namespace: default -rules: -- apiGroups: - - "" - - extensions - - apps - resources: - - pods - - replicasets - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: consul-sidekick - namespace: default -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: consul-sidekick -subjects: -- kind: ServiceAccount - name: consul-sidekick - namespace: default ---- apiVersion: v1 kind: Service metadata: @@ -351,36 +100,6 @@ spec: --- apiVersion: v1 kind: Service -metadata: - labels: - name: consul - name: consul - namespace: default -spec: - ports: - - name: consul-server - port: 8300 - targetPort: 8300 - - name: consul-serf - port: 8301 - targetPort: 8301 - - name: consul-client - port: 8400 - targetPort: 8400 - - name: consul-api - port: 8500 - targetPort: 8500 - - name: statsd-exporter-http-metrics - port: 8000 - targetPort: 8000 - - name: consul-exporter-http-metrics - port: 9107 - targetPort: 9107 - selector: - name: consul ---- -apiVersion: v1 -kind: Service metadata: labels: name: distributor @@ -651,131 +370,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - name: consul - namespace: default -spec: - minReadySeconds: 10 - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: consul - template: - metadata: - annotations: - consul-hash: e56ef6821a3557604caccaf6d5820239 - labels: - name: consul - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: consul - topologyKey: kubernetes.io/hostname - - labelSelector: - matchLabels: - name: ingester - namespaces: - - default - topologyKey: kubernetes.io/hostname - containers: - - args: - - agent - - -ui - - -server - - -client=0.0.0.0 - - -config-file=/etc/config/consul-config.json - - -bootstrap-expect=1 - - -ui-content-path=/default/consul/ - env: - - name: CHECKPOINT_DISABLE - value: "1" - image: consul:1.5.3 - imagePullPolicy: IfNotPresent - name: consul - ports: - - containerPort: 8300 - name: server - - containerPort: 8301 - name: serf - - containerPort: 8400 - name: client - - containerPort: 8500 - name: api - resources: - requests: - cpu: "4" - memory: 4Gi - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --namespace=$(POD_NAMESPACE) - - --pod-name=$(POD_NAME) - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: weaveworks/consul-sidekick:master-f18ad13 - imagePullPolicy: IfNotPresent - name: sidekick - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --web.listen-address=:8000 - - --statsd.mapping-config=/etc/config/mapping - image: prom/statsd-exporter:v0.12.2 - imagePullPolicy: IfNotPresent - name: statsd-exporter - ports: - - containerPort: 8000 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --consul.server=localhost:8500 - - --web.listen-address=:9107 - - --consul.timeout=1s - - --no-consul.health-summary - - --consul.allow_stale - image: prom/consul-exporter:v0.5.0 - imagePullPolicy: IfNotPresent - name: consul-exporter - ports: - - containerPort: 9107 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - serviceAccount: consul-sidekick - volumes: - - configMap: - name: consul - name: consul - - emptyDir: - medium: Memory - name: data ---- -apiVersion: apps/v1 -kind: Deployment metadata: name: distributor namespace: default diff --git a/operations/mimir-tests/test-gossip-ruler-disabled-generated.yaml b/operations/mimir-tests/test-gossip-ruler-disabled-generated.yaml index f71ceab511..71782ca8bb 100644 --- a/operations/mimir-tests/test-gossip-ruler-disabled-generated.yaml +++ b/operations/mimir-tests/test-gossip-ruler-disabled-generated.yaml @@ -43,225 +43,6 @@ spec: name: store-gateway --- apiVersion: v1 -kind: ServiceAccount -metadata: - name: consul-sidekick - namespace: default ---- -apiVersion: v1 -data: - consul-config.json: '{"leave_on_terminate": true, "raft_snapshot_threshold": 128, - "raft_trailing_logs": 10000, "telemetry": {"dogstatsd_addr": "127.0.0.1:9125"}}' - mapping: | - mappings: - - match: consul.*.runtime.* - name: consul_runtime - labels: - type: $2 - - match: consul.runtime.total_gc_pause_ns - name: consul_runtime_total_gc_pause_ns - labels: - type: $2 - - match: consul.consul.health.service.query-tag.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3 - - match: consul.consul.health.service.query-tag.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4 - - match: consul.consul.health.service.query-tag.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11.$12 - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.dns.domain_query.*.*.*.*.* - name: consul_dns_domain_query - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.not-found.* - name: consul_health_service_not_found - labels: - query: $1 - - match: consul.consul.health.service.query.* - name: consul_health_service_query - labels: - query: $1 - - match: consul.*.memberlist.health.score - name: consul_memberlist_health_score - labels: {} - - match: consul.serf.queue.* - name: consul_serf_events - labels: - type: $1 - - match: consul.serf.snapshot.appendLine - name: consul_serf_snapshot_appendLine - labels: - type: $1 - - match: consul.serf.coordinate.adjustment-ms - name: consul_serf_coordinate_adjustment_ms - labels: {} - - match: consul.consul.rpc.query - name: consul_rpc_query - labels: {} - - match: consul.*.consul.session_ttl.active - name: consul_session_ttl_active - labels: {} - - match: consul.raft.rpc.* - name: consul_raft_rpc - labels: - type: $1 - - match: consul.raft.rpc.appendEntries.storeLogs - name: consul_raft_rpc_appendEntries_storeLogs - labels: - type: $1 - - match: consul.consul.fsm.persist - name: consul_fsm_persist - labels: {} - - match: consul.raft.fsm.apply - name: consul_raft_fsm_apply - labels: {} - - match: consul.raft.leader.lastContact - name: consul_raft_leader_lastcontact - labels: {} - - match: consul.raft.leader.dispatchLog - name: consul_raft_leader_dispatchLog - labels: {} - - match: consul.raft.commitTime - name: consul_raft_commitTime - labels: {} - - match: consul.raft.replication.appendEntries.logs.*.*.*.* - name: consul_raft_replication_appendEntries_logs - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.appendEntries.rpc.*.*.*.* - name: consul_raft_replication_appendEntries_rpc - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.heartbeat.*.*.*.* - name: consul_raft_replication_heartbeat - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.consul.rpc.request - name: consul_rpc_requests - labels: {} - - match: consul.consul.rpc.accept_conn - name: consul_rpc_accept_conn - labels: {} - - match: consul.memberlist.udp.* - name: consul_memberlist_udp - labels: - type: $1 - - match: consul.memberlist.tcp.* - name: consul_memberlist_tcp - labels: - type: $1 - - match: consul.memberlist.gossip - name: consul_memberlist_gossip - labels: {} - - match: consul.memberlist.probeNode - name: consul_memberlist_probenode - labels: {} - - match: consul.memberlist.pushPullNode - name: consul_memberlist_pushpullnode - labels: {} - - match: consul.http.* - name: consul_http_request - labels: - method: $1 - path: / - - match: consul.http.*.* - name: consul_http_request - labels: - method: $1 - path: /$2 - - match: consul.http.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3 - - match: consul.http.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4 - - match: consul.http.*.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4/$5 - - match: consul.consul.leader.barrier - name: consul_leader_barrier - labels: {} - - match: consul.consul.leader.reconcileMember - name: consul_leader_reconcileMember - labels: {} - - match: consul.consul.leader.reconcile - name: consul_leader_reconcile - labels: {} - - match: consul.consul.fsm.coordinate.batch-update - name: consul_fsm_coordinate_batch_update - labels: {} - - match: consul.consul.fsm.autopilot - name: consul_fsm_autopilot - labels: {} - - match: consul.consul.fsm.kvs.cas - name: consul_fsm_kvs_cas - labels: {} - - match: consul.consul.fsm.register - name: consul_fsm_register - labels: {} - - match: consul.consul.fsm.deregister - name: consul_fsm_deregister - labels: {} - - match: consul.consul.fsm.tombstone.reap - name: consul_fsm_tombstone_reap - labels: {} - - match: consul.consul.catalog.register - name: consul_catalog_register - labels: {} - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.leader.reapTombstones - name: consul_leader_reapTombstones - labels: {} -kind: ConfigMap -metadata: - name: consul - namespace: default ---- -apiVersion: v1 data: overrides.yaml: | overrides: {} @@ -270,38 +51,6 @@ metadata: name: overrides namespace: default --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: consul-sidekick - namespace: default -rules: -- apiGroups: - - "" - - extensions - - apps - resources: - - pods - - replicasets - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: consul-sidekick - namespace: default -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: consul-sidekick -subjects: -- kind: ServiceAccount - name: consul-sidekick - namespace: default ---- apiVersion: v1 kind: Service metadata: @@ -348,36 +97,6 @@ spec: --- apiVersion: v1 kind: Service -metadata: - labels: - name: consul - name: consul - namespace: default -spec: - ports: - - name: consul-server - port: 8300 - targetPort: 8300 - - name: consul-serf - port: 8301 - targetPort: 8301 - - name: consul-client - port: 8400 - targetPort: 8400 - - name: consul-api - port: 8500 - targetPort: 8500 - - name: statsd-exporter-http-metrics - port: 8000 - targetPort: 8000 - - name: consul-exporter-http-metrics - port: 9107 - targetPort: 9107 - selector: - name: consul ---- -apiVersion: v1 -kind: Service metadata: labels: name: distributor @@ -630,131 +349,6 @@ spec: --- apiVersion: apps/v1 kind: Deployment -metadata: - name: consul - namespace: default -spec: - minReadySeconds: 10 - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: consul - template: - metadata: - annotations: - consul-hash: e56ef6821a3557604caccaf6d5820239 - labels: - name: consul - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: consul - topologyKey: kubernetes.io/hostname - - labelSelector: - matchLabels: - name: ingester - namespaces: - - default - topologyKey: kubernetes.io/hostname - containers: - - args: - - agent - - -ui - - -server - - -client=0.0.0.0 - - -config-file=/etc/config/consul-config.json - - -bootstrap-expect=1 - - -ui-content-path=/default/consul/ - env: - - name: CHECKPOINT_DISABLE - value: "1" - image: consul:1.5.3 - imagePullPolicy: IfNotPresent - name: consul - ports: - - containerPort: 8300 - name: server - - containerPort: 8301 - name: serf - - containerPort: 8400 - name: client - - containerPort: 8500 - name: api - resources: - requests: - cpu: "4" - memory: 4Gi - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --namespace=$(POD_NAMESPACE) - - --pod-name=$(POD_NAME) - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: weaveworks/consul-sidekick:master-f18ad13 - imagePullPolicy: IfNotPresent - name: sidekick - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --web.listen-address=:8000 - - --statsd.mapping-config=/etc/config/mapping - image: prom/statsd-exporter:v0.12.2 - imagePullPolicy: IfNotPresent - name: statsd-exporter - ports: - - containerPort: 8000 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --consul.server=localhost:8500 - - --web.listen-address=:9107 - - --consul.timeout=1s - - --no-consul.health-summary - - --consul.allow_stale - image: prom/consul-exporter:v0.5.0 - imagePullPolicy: IfNotPresent - name: consul-exporter - ports: - - containerPort: 9107 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - serviceAccount: consul-sidekick - volumes: - - configMap: - name: consul - name: consul - - emptyDir: - medium: Memory - name: data ---- -apiVersion: apps/v1 -kind: Deployment metadata: name: distributor namespace: default diff --git a/operations/mimir/consul.libsonnet b/operations/mimir/consul.libsonnet index e5c23a3dd9..2ec984103b 100644 --- a/operations/mimir/consul.libsonnet +++ b/operations/mimir/consul.libsonnet @@ -2,7 +2,7 @@ local consul = import 'consul/consul.libsonnet'; { _config+:: { - consul_enabled: true, + consul_enabled: if $._config.memberlist_ring_enabled && !$._config.multikv_migration_enabled then false else true, consul_replicas: 1, other_namespaces+: [], }, From 41e4d0d263157eaf4f2a8b760a35bb6b8cdde557 Mon Sep 17 00:00:00 2001 From: Javad Hajiani Date: Tue, 21 Jun 2022 08:37:02 +0200 Subject: [PATCH 12/63] Add support for authenticating by bearer or JWT token to mimirtool (#2146) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add Auth token support to mimirtool rules/alert/alertmanager commands * Update CHANGELOG.md Co-authored-by: Nick Pillitteri <56quarters@users.noreply.github.com> * Update pkg/mimirtool/commands/alerts.go Co-authored-by: Peter Štibraný * make auth-token environment variable configurable * Updated error message to be more meaningful * Moved if condition to upper block for having better readability * Moved all if blocks to single switch block * Add docs for MIMIR_AUTH_TOKEN Co-authored-by: Nick Pillitteri <56quarters@users.noreply.github.com> Co-authored-by: Peter Štibraný --- CHANGELOG.md | 1 + operations/mimir-rules-action/README.md | 1 + pkg/mimirtool/client/client.go | 47 +++++++++++++++++-------- pkg/mimirtool/commands/alerts.go | 3 +- pkg/mimirtool/commands/env_var.go | 3 ++ pkg/mimirtool/commands/rules.go | 1 + 6 files changed, 40 insertions(+), 16 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 31e4dbe998..b3a3ff7576 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -81,6 +81,7 @@ ### Mimirtool +* [FEATURE] Added bearer token support for when Mimir is behind a gateway authenticating by bearer token. #2146 * [BUGFIX] mimirtool analyze: Fix dashboard JSON unmarshalling errors (#1840). #1973 ### Mimir Continuous Test diff --git a/operations/mimir-rules-action/README.md b/operations/mimir-rules-action/README.md index d16123f776..5249ccff9a 100644 --- a/operations/mimir-rules-action/README.md +++ b/operations/mimir-rules-action/README.md @@ -11,6 +11,7 @@ This action is configured using environment variables defined in the workflow. T | `MIMIR_ADDRESS` | URL address for the target Mimir cluster | `false` | N/A | | `MIMIR_TENANT_ID` | ID for the desired tenant in the target Mimir cluster. Used as the username under HTTP Basic authentication. | `false` | N/A | | `MIMIR_API_KEY` | Optional password that is required for password-protected Mimir clusters. An encrypted [github secret](https://help.github.com/en/actions/automating-your-workflow-with-github-actions/creating-and-using-encrypted-secrets) is recommended. Used as the password under HTTP Basic authentication. | `false` | N/A | +| `MIMIR_AUTH_TOKEN` | Optional bearer or JWT token that is required for Mimir clusters authenticating by bearer or JWT token. An encrypted [github secret](https://help.github.com/en/actions/automating-your-workflow-with-github-actions/creating-and-using-encrypted-secrets) is recommended. | `false` | N/A | | `ACTION` | Which action to take. One of `lint`, `prepare`, `check`, `diff` or `sync` | `true` | N/A | | `RULES_DIR` | Comma-separated list of directories to walk in order to source rules files | `false` | `./` | | `LABEL_EXCLUDED_RULE_GROUPS` | Comma separated list of rule group names to exclude when including the configured label to aggregations. This option is supported only by the `prepare` action. | `false` | N/A | diff --git a/pkg/mimirtool/client/client.go b/pkg/mimirtool/client/client.go index 6411ee9198..f886890cc6 100644 --- a/pkg/mimirtool/client/client.go +++ b/pkg/mimirtool/client/client.go @@ -38,17 +38,19 @@ type Config struct { Address string `yaml:"address"` ID string `yaml:"id"` TLS tls.ClientConfig - UseLegacyRoutes bool `yaml:"use_legacy_routes"` + UseLegacyRoutes bool `yaml:"use_legacy_routes"` + AuthToken string `yaml:"auth_token"` } // MimirClient is used to get and load rules into a Mimir ruler. type MimirClient struct { - user string - key string - id string - endpoint *url.URL - Client http.Client - apiPath string + user string + key string + id string + endpoint *url.URL + Client http.Client + apiPath string + authToken string } // New returns a new MimirClient. @@ -90,12 +92,13 @@ func New(cfg Config) (*MimirClient, error) { } return &MimirClient{ - user: cfg.User, - key: cfg.Key, - id: cfg.ID, - endpoint: endpoint, - Client: client, - apiPath: path, + user: cfg.User, + key: cfg.Key, + id: cfg.ID, + endpoint: endpoint, + Client: client, + apiPath: path, + authToken: cfg.AuthToken, }, nil } @@ -119,10 +122,24 @@ func (r *MimirClient) doRequest(path, method string, payload []byte) (*http.Resp return nil, err } - if r.user != "" { + switch { + case (r.user != "" || r.key != "") && r.authToken != "": + err := errors.New("at most one of basic auth or auth token should be configured") + log.WithFields(log.Fields{ + "url": req.URL.String(), + "method": req.Method, + "error": err, + }).Errorln("error during setting up request to mimir api") + return nil, err + + case r.user != "": req.SetBasicAuth(r.user, r.key) - } else if r.key != "" { + + case r.key != "": req.SetBasicAuth(r.id, r.key) + + case r.authToken != "": + req.Header.Add("Authorization", "Bearer "+r.authToken) } req.Header.Add("X-Scope-OrgID", r.id) diff --git a/pkg/mimirtool/commands/alerts.go b/pkg/mimirtool/commands/alerts.go index bf1c2b4a33..aec2c9e21b 100644 --- a/pkg/mimirtool/commands/alerts.go +++ b/pkg/mimirtool/commands/alerts.go @@ -72,7 +72,7 @@ func (a *AlertmanagerCommand) Register(app *kingpin.Application, envVars EnvVarN alertCmd.Flag("tls-ca-path", "TLS CA certificate to verify Grafana Mimir API as part of mTLS; alternatively, set "+envVars.TLSCAPath+".").Default("").Envar(envVars.TLSCAPath).StringVar(&a.ClientConfig.TLS.CAPath) alertCmd.Flag("tls-cert-path", "TLS client certificate to authenticate with the Grafana Mimir API as part of mTLS; alternatively, set "+envVars.TLSCertPath+".").Default("").Envar(envVars.TLSCertPath).StringVar(&a.ClientConfig.TLS.CertPath) alertCmd.Flag("tls-key-path", "TLS client certificate private key to authenticate with the Grafana Mimir API as part of mTLS; alternatively, set "+envVars.TLSKeyPath+".").Default("").Envar(envVars.TLSKeyPath).StringVar(&a.ClientConfig.TLS.KeyPath) - + alertCmd.Flag("auth-token", "Authentication token bearer authentication; alternatively, set "+envVars.AuthToken+".").Default("").Envar(envVars.AuthToken).StringVar(&a.ClientConfig.AuthToken) // Get Alertmanager Configs Command getAlertsCmd := alertCmd.Command("get", "Get the Alertmanager configuration that is currently in the Grafana Mimir Alertmanager.").Action(a.getConfig) getAlertsCmd.Flag("disable-color", "disable colored output").BoolVar(&a.DisableColor) @@ -147,6 +147,7 @@ func (a *AlertCommand) Register(app *kingpin.Application, envVars EnvVarNames) { alertCmd.Flag("id", "Mimir tenant id, alternatively set "+envVars.TenantID+".").Envar(envVars.TenantID).Required().StringVar(&a.ClientConfig.ID) alertCmd.Flag("user", fmt.Sprintf("API user to use when contacting Grafana Mimir, alternatively set %s. If empty, %s will be used instead.", envVars.APIUser, envVars.TenantID)).Default("").Envar(envVars.APIUser).StringVar(&a.ClientConfig.User) alertCmd.Flag("key", "API key to use when contacting Grafana Mimir; alternatively, set "+envVars.APIKey+".").Default("").Envar(envVars.APIKey).StringVar(&a.ClientConfig.Key) + alertCmd.Flag("auth-token", "Authentication token for bearer token or JWT auth, alternatively set "+envVars.AuthToken+".").Default("").Envar(envVars.AuthToken).StringVar(&a.ClientConfig.AuthToken) verifyAlertsCmd := alertCmd.Command("verify", "Verifies whether or not alerts in an Alertmanager cluster are deduplicated; useful for verifying correct configuration when transferring from Prometheus to Grafana Mimir alert evaluation.").Action(a.verifyConfig) verifyAlertsCmd.Flag("ignore-alerts", "A comma separated list of Alert names to ignore in deduplication checks.").StringVar(&a.IgnoreString) diff --git a/pkg/mimirtool/commands/env_var.go b/pkg/mimirtool/commands/env_var.go index 36b0fc4174..e3d3de32e7 100644 --- a/pkg/mimirtool/commands/env_var.go +++ b/pkg/mimirtool/commands/env_var.go @@ -11,6 +11,7 @@ type EnvVarNames struct { TLSKeyPath string TenantID string UseLegacyRoutes string + AuthToken string } func NewEnvVarsWithPrefix(prefix string) EnvVarNames { @@ -23,6 +24,7 @@ func NewEnvVarsWithPrefix(prefix string) EnvVarNames { tlsCertPath = "TLS_CERT_PATH" tlsKeyPath = "TLS_KEY_PATH" useLegacyRoutes = "USE_LEGACY_ROUTES" + authToken = "AUTH_TOKEN" ) if len(prefix) > 0 && prefix[len(prefix)-1] != '_' { @@ -38,5 +40,6 @@ func NewEnvVarsWithPrefix(prefix string) EnvVarNames { TLSKeyPath: prefix + tlsKeyPath, TenantID: prefix + tenantID, UseLegacyRoutes: prefix + useLegacyRoutes, + AuthToken: prefix + authToken, } } diff --git a/pkg/mimirtool/commands/rules.go b/pkg/mimirtool/commands/rules.go index e999fdd30e..e51516ae5d 100644 --- a/pkg/mimirtool/commands/rules.go +++ b/pkg/mimirtool/commands/rules.go @@ -97,6 +97,7 @@ func (r *RuleCommand) Register(app *kingpin.Application, envVars EnvVarNames) { rulesCmd.Flag("user", fmt.Sprintf("API user to use when contacting Grafana Mimir; alternatively, set %s. If empty, %s is used instead.", envVars.APIUser, envVars.TenantID)).Default("").Envar(envVars.APIUser).StringVar(&r.ClientConfig.User) rulesCmd.Flag("key", "API key to use when contacting Grafana Mimir; alternatively, set "+envVars.APIKey+".").Default("").Envar(envVars.APIKey).StringVar(&r.ClientConfig.Key) rulesCmd.Flag("backend", "Backend type to interact with (deprecated)").Default(rules.MimirBackend).EnumVar(&r.Backend, backends...) + rulesCmd.Flag("auth-token", "Authentication token for bearer token or JWT auth, alternatively set "+envVars.AuthToken+".").Default("").Envar(envVars.AuthToken).StringVar(&r.ClientConfig.AuthToken) // Register rule commands listCmd := rulesCmd. From 90ceaa56f1cca1a178fb6387d742fbc5e7ef8e4d Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Tue, 21 Jun 2022 10:02:27 +0200 Subject: [PATCH 13/63] Helm: customizable ServiceMonitor cluster label (#2125) * Helm: customizable ServiceMonitor cluster label Allow disabling and overwriting the cluster label in metrics scraped with the service monitor. --- .../charts/mimir-distributed/CHANGELOG.md | 1 + .../mimir-distributed/scripts/create-servmon | 139 ------------------ .../admin-api/admin-api-servmon.yaml | 53 +------ .../alertmanager/alertmanager-servmon.yaml | 58 +------- .../compactor/compactor-servmon.yaml | 53 +------ .../distributor/distributor-servmon.yaml | 53 +------ .../templates/gateway/gateway-servmon.yaml | 53 +------ .../templates/ingester/ingester-servmon.yaml | 58 +------- .../templates/lib/service-monitor.tpl | 68 +++++++++ .../templates/querier/querier-servmon.yaml | 53 +------ .../query-frontend-servmon.yaml | 58 +------- .../templates/ruler/ruler-servmon.yaml | 53 +------ .../store-gateway/store-gateway-servmon.yaml | 58 +------- .../helm/charts/mimir-distributed/values.yaml | 4 + 14 files changed, 83 insertions(+), 679 deletions(-) delete mode 100755 operations/helm/charts/mimir-distributed/scripts/create-servmon create mode 100644 operations/helm/charts/mimir-distributed/templates/lib/service-monitor.tpl diff --git a/operations/helm/charts/mimir-distributed/CHANGELOG.md b/operations/helm/charts/mimir-distributed/CHANGELOG.md index b60fdb3327..15009a305b 100644 --- a/operations/helm/charts/mimir-distributed/CHANGELOG.md +++ b/operations/helm/charts/mimir-distributed/CHANGELOG.md @@ -16,6 +16,7 @@ Entries should include a reference to the Pull Request that introduced the chang * [CHANGE] Enable multi-tenancy by default. This means `multitenancy_enabled` is now `true` for both Mimir and Enterprise Metrics. Nginx will inject `X-Scope-OrgID=anonymous` header if the header is not present, ensuring backwards compatibility. #2117 * [CHANGE] Change default value for `blocks_storage.bucket_store.chunks_cache.memcached.timeout` to `450ms` to increase use of cached data. #2035 * [FEATURE] Add `mimir-continuous-test` in smoke-test mode. Use `helm test` to run a smoke test of the read + write path. +* [ENHANCEMENT] The new value `serviceMonitor.clusterLabel` controls whether to add a `cluster` label and with what content to ServiceMonitor metrics. #2125 * [ENHANCEMENT] Set the flag `ingester.ring.instance-availability-zone` to `zone-default` for ingesters. This is the first step of introducing multi-zone ingesters. #2114 * [ENHANCEMENT] Add `mimir.structuredConfig` for adding and modifing `mimir.config` values after template evaulation. It can be used to alter individual values in the configuration and it's structured YAML instead of text. #2100 * [ENHANCEMENT] Add `global.podAnnotations` which can add POD annotations to PODs directly controlled by this chart (mimir services, nginx). #2099 diff --git a/operations/helm/charts/mimir-distributed/scripts/create-servmon b/operations/helm/charts/mimir-distributed/scripts/create-servmon deleted file mode 100755 index 16443be09d..0000000000 --- a/operations/helm/charts/mimir-distributed/scripts/create-servmon +++ /dev/null @@ -1,139 +0,0 @@ -#!/usr/bin/env bash - -set -euf -o pipefail - -function usage { - cat < - -Options: - -g component is member of memberlist - -e is enterprise feature - -t toggleable with "enabled" - -ne opensource only feature - -Examples: - $0 overrides-exporter -EOF -} - -if [[ $# -eq 0 ]]; then - usage - exit 1 -fi - -component="$1" -enterprise=false -non_enterprise=false -toggle=false -memberlist='' - -while [[ $# -gt 0 ]] ; do -case "$1" in - -g) - memberlist=' "memberlist" true' - ;; - -e) - enterprise=true - ;; - -ne) - non_enterprise=true - ;; - -t) - toggle=true - ;; - -h) usage && exit 0 ;; - *) - component="$1" ;; -esac -shift -done - -# Convert kebab-case to snake_case. -function snake_case { - sed -E -e 's/-/_/' <<<"$1" -} - -snake_cased="$(snake_case "${component}")" - -if [ "${enterprise}" = "true" ] ; then -echo "{{- if .Values.enterprise.enabled -}}" -fi - -if [ "${non_enterprise}" = "true" ] ; then -echo "{{- if not .Values.enterprise.enabled -}}" -fi - -if [ "${toggle}" = "true" ] ; then -echo "{{- if .Values.${snake_cased}.enabled -}}" -fi - -cat < Date: Tue, 21 Jun 2022 11:07:04 +0200 Subject: [PATCH 14/63] Docs: add mimir recommended scrape interval (#2147) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * docs: add mimir recommended scrape interval * Update docs/sources/operators-guide/visualizing-metrics/requirements.md Co-authored-by: Peter Štibraný * Update docs/sources/operators-guide/visualizing-metrics/requirements.md * feat(changelog): mimir required scrape interval * Fix changelog entries ordering Co-authored-by: Ursula Kallio Co-authored-by: Diego ROJAS <63368264+rojasdiegopro@users.noreply.github.com> --- CHANGELOG.md | 1 + .../operators-guide/monitoring-grafana-mimir/requirements.md | 2 ++ 2 files changed, 3 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index b3a3ff7576..95ea7ea8aa 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -96,6 +96,7 @@ * [ENHANCEMENT] Explain the runtime override of active series matchers. #1868 * [ENHANCEMENT] Clarify "Set rule group" API specification. #1869 * [ENHANCEMENT] Published Mimir jsonnet documentation. #2024 +* [ENHANCEMENT] Documented required scrape interval for using alerting and recording rules from Mimir jsonnet. #2147 * [BUGFIX] Fixed ruler configuration used in the getting started guide. #2052 * [BUGFIX] Fixed Mimir Alertmanager datasource in Grafana used by "Play with Grafana Mimir" tutorial. #2115 diff --git a/docs/sources/operators-guide/monitoring-grafana-mimir/requirements.md b/docs/sources/operators-guide/monitoring-grafana-mimir/requirements.md index eebf614874..44aba2bb22 100644 --- a/docs/sources/operators-guide/monitoring-grafana-mimir/requirements.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/requirements.md @@ -21,6 +21,8 @@ The following table shows the required label names and whether they can be custo | `pod` | Yes | The unique identifier of a Mimir replica (eg. Pod ID when running on Kubernetes). The label name can be configured with the `per_instance_label` field in the mixin config. | | `instance` | Yes | The unique identifier of the node or machine where the Mimir replica is running (eg. the node when running on Kubernetes). The label name can be configured with the `per_node_label` field in the mixin config. | +For rules and alerts to function properly, you must configure your Prometheus or Grafana Agent to scrape metrics from Grafana Mimir at an interval of `15s` or shorter. + ## Job selection A metric could be exposed by multiple Grafana Mimir components, or even different applications running in the same namespace. From d58da4ab1b5fd7ad62e7a0bbba7a0c25f6256199 Mon Sep 17 00:00:00 2001 From: Steve Simpson Date: Tue, 21 Jun 2022 11:31:07 +0200 Subject: [PATCH 15/63] Ruler: Report wider range of errors from remote rule evaluation queries. (#2143) The `cortex_ruler_queries_failed_total` metric should increment whenever a rule evaluation query fails. Before this change, errors would be ignored unless they are specifically of type`httpgrpc`. This means that many errors get lost, for example, connectivity errors coming from gRPC. --- CHANGELOG.md | 2 +- pkg/ruler/compat.go | 4 ++-- pkg/ruler/compat_test.go | 33 ++++++++++++++++++++++++--------- 3 files changed, 27 insertions(+), 12 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 95ea7ea8aa..4432955817 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -38,7 +38,7 @@ * [BUGFIX] Ring: fix bug where instances may appear unhealthy in the hash ring web UI even though they are not. #1933 * [BUGFIX] API: gzip is now enforced when identity encoding is explicitly rejected. #1864 * [BUGFIX] Fix panic at startup when Mimir is running in monolithic mode and query sharding is enabled. #2036 -* [BUGFIX] Ruler: report failed evaluation metric for any 5xx status code returned by the query-frontend when remote operational mode is enabled. #2053 +* [BUGFIX] Ruler: report `cortex_ruler_queries_failed_total` metric for any remote query error except 4xx when remote operational mode is enabled. #2053 #2143 * [BUGFIX] Ingester: fix slow rollout when using `-ingester.ring.unregister-on-shutdown=false` with long `-ingester.ring.heartbeat-period`. #2085 * [BUGFIX] Ruler: add timeout for remote rule evaluation queries to prevent rule group evaluations getting stuck indefinitely. The duration is configurable with (`-ruler.query-frontend.timeout` (default `2m`). #2090 * [BUGFIX] Limits: Active series custom tracker configuration has been named back from `active_series_custom_trackers_config` to `active_series_custom_trackers`. For backwards compatibility both version is going to be supported for until Mimir v2.4. When both fields are specified, `active_series_custom_trackers_config` takes precedence over `active_series_custom_trackers`. #2101 diff --git a/pkg/ruler/compat.go b/pkg/ruler/compat.go index 7776e7db63..1b4197a99f 100644 --- a/pkg/ruler/compat.go +++ b/pkg/ruler/compat.go @@ -150,9 +150,9 @@ func MetricsQueryFunc(qf rules.QueryFunc, queries, failedQueries prometheus.Coun return result, origErr } else if err != nil { - // When remote querier enabled, only consider failed queries those returning a 5xx status code. + // When remote querier enabled, consider anything an error except those with 4xx status code. st, ok := status.FromError(err) - if ok && st.Code()/100 == 5 { + if !(ok && st.Code()/100 == 4) { failedQueries.Inc() } } diff --git a/pkg/ruler/compat_test.go b/pkg/ruler/compat_test.go index 7fb3718c75..1d9f4cdb56 100644 --- a/pkg/ruler/compat_test.go +++ b/pkg/ruler/compat_test.go @@ -153,6 +153,7 @@ func TestPusherErrors(t *testing.T) { func TestMetricsQueryFuncErrors(t *testing.T) { for name, tc := range map[string]struct { returnedError error + expectedError error expectedQueries int expectedFailedQueries int }{ @@ -161,44 +162,58 @@ func TestMetricsQueryFuncErrors(t *testing.T) { expectedFailedQueries: 0, }, - "400 error": { + "httpgrpc 400 error": { returnedError: httpgrpc.Errorf(http.StatusBadRequest, "test error"), + expectedError: httpgrpc.Errorf(http.StatusBadRequest, "test error"), expectedQueries: 1, expectedFailedQueries: 0, // 400 errors not reported as failures. }, - "500 error": { + "httpgrpc 500 error": { returnedError: httpgrpc.Errorf(http.StatusInternalServerError, "test error"), + expectedError: httpgrpc.Errorf(http.StatusInternalServerError, "test error"), expectedQueries: 1, expectedFailedQueries: 1, // 500 errors are failures }, + "unknown but non-queryable error": { + returnedError: errors.New("test error"), + expectedError: errors.New("test error"), + expectedQueries: 1, + expectedFailedQueries: 1, // Any other error should always be reported. + }, + "promql.ErrStorage": { - returnedError: promql.ErrStorage{Err: errors.New("test error")}, + returnedError: WrapQueryableErrors(promql.ErrStorage{Err: errors.New("test error")}), + expectedError: promql.ErrStorage{Err: errors.New("test error")}, expectedQueries: 1, expectedFailedQueries: 1, }, "promql.ErrQueryCanceled": { - returnedError: promql.ErrQueryCanceled("test error"), + returnedError: WrapQueryableErrors(promql.ErrQueryCanceled("test error")), + expectedError: promql.ErrQueryCanceled("test error"), expectedQueries: 1, expectedFailedQueries: 0, // Not interesting. }, "promql.ErrQueryTimeout": { - returnedError: promql.ErrQueryTimeout("test error"), + returnedError: WrapQueryableErrors(promql.ErrQueryTimeout("test error")), + expectedError: promql.ErrQueryTimeout("test error"), expectedQueries: 1, expectedFailedQueries: 0, // Not interesting. }, "promql.ErrTooManySamples": { - returnedError: promql.ErrTooManySamples("test error"), + returnedError: WrapQueryableErrors(promql.ErrTooManySamples("test error")), + expectedError: promql.ErrTooManySamples("test error"), expectedQueries: 1, expectedFailedQueries: 0, // Not interesting. }, "unknown error": { - returnedError: errors.New("test error"), + returnedError: WrapQueryableErrors(errors.New("test error")), + expectedError: errors.New("test error"), expectedQueries: 1, expectedFailedQueries: 1, // unknown errors are not 400, so they are reported. }, @@ -208,12 +223,12 @@ func TestMetricsQueryFuncErrors(t *testing.T) { failures := prometheus.NewCounter(prometheus.CounterOpts{}) mockFunc := func(ctx context.Context, q string, t time.Time) (promql.Vector, error) { - return promql.Vector{}, WrapQueryableErrors(tc.returnedError) + return promql.Vector{}, tc.returnedError } qf := MetricsQueryFunc(mockFunc, queries, failures) _, err := qf(context.Background(), "test", time.Now()) - require.Equal(t, tc.returnedError, err) + require.Equal(t, tc.expectedError, err) require.Equal(t, tc.expectedQueries, int(testutil.ToFloat64(queries))) require.Equal(t, tc.expectedFailedQueries, int(testutil.ToFloat64(failures))) From 9f46b6fd25ae5f813751be2a8b71c0da31d1ed36 Mon Sep 17 00:00:00 2001 From: Dimitar Dimitrov Date: Tue, 21 Jun 2022 13:03:28 +0200 Subject: [PATCH 16/63] Fix GossipMembersMismatch misfiring with remote ruler eval (#2159) * Fix GossipMembersMismatch misfiring with remote ruler eval The alert doesn't take into account the queries that now have ruler- prepended to their name. This PR fixes it Signed-off-by: Dimitar Dimitrov * Simpler regexes * Update computed mixin --- operations/mimir-mixin-compiled/alerts.yaml | 2 +- operations/mimir-mixin/config.libsonnet | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/operations/mimir-mixin-compiled/alerts.yaml b/operations/mimir-mixin-compiled/alerts.yaml index 28381bfe5a..46b42cdfb3 100644 --- a/operations/mimir-mixin-compiled/alerts.yaml +++ b/operations/mimir-mixin-compiled/alerts.yaml @@ -352,7 +352,7 @@ groups: message: Mimir instance {{ $labels.pod }} in {{ $labels.cluster }}/{{ $labels.namespace }} sees incorrect number of gossip members. expr: | - avg by (cluster, namespace) (memberlist_client_cluster_members_count) != sum by (cluster, namespace) (up{job=~".+/(alertmanager|compactor|distributor|ingester.*|querier.*|ruler|store-gateway.*|cortex|mimir)"}) + avg by (cluster, namespace) (memberlist_client_cluster_members_count) != sum by (cluster, namespace) (up{job=~".+/(alertmanager|compactor|distributor|ingester.*|querier.*|ruler|ruler-querier.*|store-gateway.*|cortex|mimir)"}) for: 15m labels: severity: warning diff --git a/operations/mimir-mixin/config.libsonnet b/operations/mimir-mixin/config.libsonnet index adbeafc3b4..a821f6edb8 100644 --- a/operations/mimir-mixin/config.libsonnet +++ b/operations/mimir-mixin/config.libsonnet @@ -30,7 +30,7 @@ ruler_query_frontend: '(ruler-query-frontend.*)', // Match also custom ruler-query-frontend deployments. query_scheduler: 'query-scheduler.*', // Not part of single-binary. Match also custom query-scheduler deployments. ruler_query_scheduler: 'ruler-query-scheduler.*', // Not part of single-binary. Match also custom query-scheduler deployments. - ring_members: ['alertmanager', 'compactor', 'distributor', 'ingester.*', 'querier.*', 'ruler', 'store-gateway.*', 'cortex', 'mimir'], + ring_members: ['alertmanager', 'compactor', 'distributor', 'ingester.*', 'querier.*', 'ruler', 'ruler-querier.*', 'store-gateway.*', 'cortex', 'mimir'], store_gateway: '(store-gateway.*|cortex|mimir)', // Match also per-zone store-gateway deployments. gateway: '(gateway|cortex-gw|cortex-gw-internal)', compactor: 'compactor.*|cortex|mimir', // Match also custom compactor deployments. From 9c6e631093945d624b36cb8eff37b6d0e3c99433 Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Tue, 21 Jun 2022 13:13:21 +0200 Subject: [PATCH 17/63] Helm: fix namespace issues (#2123) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Helm: add namespace specification to all objects * Helm: add some default values for ServiceMonitor objects Add namespace and namespaceSelector default values. Signed-off-by: György Krajcsovits Co-authored-by: Rasmus Dencker --- operations/helm/charts/mimir-distributed/CHANGELOG.md | 2 ++ .../templates/admin-api/admin-api-dep.yaml | 2 ++ .../templates/admin-api/admin-api-pdb.yaml | 1 + .../templates/admin-api/admin-api-svc.yaml | 1 + .../templates/alertmanager/alertmanager-dep.yaml | 2 ++ .../templates/alertmanager/alertmanager-pdb.yaml | 1 + .../alertmanager/alertmanager-statefulset.yaml | 2 ++ .../alertmanager/alertmanager-svc-headless.yaml | 1 + .../templates/alertmanager/alertmanager-svc.yaml | 1 + .../templates/compactor/compactor-pdb.yaml | 1 + .../templates/compactor/compactor-statefulset.yaml | 2 ++ .../templates/compactor/compactor-svc.yaml | 1 + .../templates/distributor/distributor-dep.yaml | 2 ++ .../templates/distributor/distributor-pdb.yaml | 1 + .../distributor/distributor-svc-headless.yaml | 1 + .../templates/distributor/distributor-svc.yaml | 1 + .../templates/gateway/gateway-dep.yaml | 2 ++ .../templates/gateway/gateway-ingress.yaml | 1 + .../templates/gateway/gateway-pdb.yaml | 1 + .../templates/gateway/gateway-svc.yaml | 1 + .../templates/gossip-ring/gossip-ring-svc.yaml | 1 + .../templates/ingester/ingester-pdb.yaml | 1 + .../templates/ingester/ingester-statefulset.yaml | 2 ++ .../templates/ingester/ingester-svc-headless.yaml | 1 + .../templates/ingester/ingester-svc.yaml | 1 + .../templates/lib/service-monitor.tpl | 11 ++++++----- .../mimir-distributed/templates/license-secret.yaml | 1 + .../mimir-distributed/templates/mimir-config.yaml | 1 + .../mimir-distributed/templates/minio-secrets.yaml | 1 + .../mimir-distributed/templates/nginx/ingress.yaml | 1 + .../templates/nginx/nginx-configmap.yaml | 1 + .../mimir-distributed/templates/nginx/nginx-dep.yaml | 2 ++ .../mimir-distributed/templates/nginx/nginx-hpa.yaml | 1 + .../mimir-distributed/templates/nginx/nginx-pdb.yaml | 1 + .../templates/nginx/nginx-secret.yaml | 1 + .../mimir-distributed/templates/nginx/nginx-svc.yaml | 1 + .../overrides-exporter/overrides-exporter-dep.yaml | 2 ++ .../overrides-exporter/overrides-exporter-pdb.yaml | 1 + .../overrides-exporter/overrides-exporter-svc.yaml | 1 + .../templates/podsecuritypolicy.yaml | 1 + .../templates/querier/querier-dep.yaml | 1 + .../templates/querier/querier-pdb.yaml | 1 + .../templates/querier/querier-svc.yaml | 1 + .../templates/query-frontend/query-frontend-dep.yaml | 2 ++ .../templates/query-frontend/query-frontend-pdb.yaml | 1 + .../query-frontend/query-frontend-svc-headless.yaml | 1 + .../templates/query-frontend/query-frontend-svc.yaml | 1 + .../helm/charts/mimir-distributed/templates/role.yaml | 1 + .../mimir-distributed/templates/rolebinding.yaml | 1 + .../mimir-distributed/templates/ruler/ruler-dep.yaml | 2 ++ .../mimir-distributed/templates/ruler/ruler-pdb.yaml | 1 + .../mimir-distributed/templates/ruler/ruler-svc.yaml | 1 + .../templates/runtime-configmap.yaml | 1 + .../mimir-distributed/templates/serviceaccount.yaml | 1 + .../templates/store-gateway/store-gateway-pdb.yaml | 1 + .../store-gateway/store-gateway-statefulset.yaml | 2 ++ .../store-gateway/store-gateway-svc-headless.yaml | 1 + .../templates/store-gateway/store-gateway-svc.yaml | 1 + .../templates/tokengen/tokengen-job.yaml | 2 ++ operations/helm/charts/mimir-distributed/values.yaml | 4 +++- .../templates/admin-api/admin-api-dep.yaml | 2 ++ .../templates/admin-api/admin-api-svc.yaml | 1 + .../alertmanager/alertmanager-statefulset.yaml | 2 ++ .../alertmanager/alertmanager-svc-headless.yaml | 1 + .../templates/alertmanager/alertmanager-svc.yaml | 1 + .../templates/compactor/compactor-statefulset.yaml | 2 ++ .../templates/compactor/compactor-svc.yaml | 1 + .../templates/distributor/distributor-dep.yaml | 2 ++ .../distributor/distributor-svc-headless.yaml | 1 + .../templates/distributor/distributor-svc.yaml | 1 + .../templates/gateway/gateway-dep.yaml | 2 ++ .../templates/gateway/gateway-svc.yaml | 1 + .../templates/gossip-ring/gossip-ring-svc.yaml | 1 + .../templates/ingester/ingester-pdb.yaml | 1 + .../templates/ingester/ingester-statefulset.yaml | 2 ++ .../templates/ingester/ingester-svc-headless.yaml | 1 + .../templates/ingester/ingester-svc.yaml | 1 + .../mimir-distributed/templates/license-secret.yaml | 1 + .../mimir-distributed/templates/mimir-config.yaml | 1 + .../mimir-distributed/templates/minio-secrets.yaml | 1 + .../overrides-exporter/overrides-exporter-dep.yaml | 2 ++ .../overrides-exporter/overrides-exporter-svc.yaml | 1 + .../templates/podsecuritypolicy.yaml | 1 + .../templates/querier/querier-dep.yaml | 1 + .../templates/querier/querier-svc.yaml | 1 + .../templates/query-frontend/query-frontend-dep.yaml | 2 ++ .../query-frontend/query-frontend-svc-headless.yaml | 1 + .../templates/query-frontend/query-frontend-svc.yaml | 1 + .../mimir-distributed/templates/role.yaml | 1 + .../mimir-distributed/templates/rolebinding.yaml | 1 + .../mimir-distributed/templates/ruler/ruler-dep.yaml | 2 ++ .../mimir-distributed/templates/ruler/ruler-svc.yaml | 1 + .../templates/runtime-configmap.yaml | 1 + .../mimir-distributed/templates/serviceaccount.yaml | 1 + .../templates/store-gateway/store-gateway-pdb.yaml | 1 + .../store-gateway/store-gateway-statefulset.yaml | 2 ++ .../store-gateway/store-gateway-svc-headless.yaml | 1 + .../templates/store-gateway/store-gateway-svc.yaml | 1 + .../templates/tokengen/tokengen-job.yaml | 2 ++ .../templates/admin-api/admin-api-dep.yaml | 2 ++ .../templates/admin-api/admin-api-svc.yaml | 1 + .../alertmanager/alertmanager-statefulset.yaml | 2 ++ .../alertmanager/alertmanager-svc-headless.yaml | 1 + .../templates/alertmanager/alertmanager-svc.yaml | 1 + .../templates/compactor/compactor-statefulset.yaml | 2 ++ .../templates/compactor/compactor-svc.yaml | 1 + .../templates/distributor/distributor-dep.yaml | 2 ++ .../distributor/distributor-svc-headless.yaml | 1 + .../templates/distributor/distributor-svc.yaml | 1 + .../templates/gateway/gateway-dep.yaml | 2 ++ .../templates/gateway/gateway-svc.yaml | 1 + .../templates/gossip-ring/gossip-ring-svc.yaml | 1 + .../templates/ingester/ingester-pdb.yaml | 1 + .../templates/ingester/ingester-statefulset.yaml | 2 ++ .../templates/ingester/ingester-svc-headless.yaml | 1 + .../templates/ingester/ingester-svc.yaml | 1 + .../mimir-distributed/templates/license-secret.yaml | 1 + .../mimir-distributed/templates/mimir-config.yaml | 1 + .../overrides-exporter/overrides-exporter-dep.yaml | 2 ++ .../overrides-exporter/overrides-exporter-svc.yaml | 1 + .../templates/podsecuritypolicy.yaml | 1 + .../templates/querier/querier-dep.yaml | 1 + .../templates/querier/querier-svc.yaml | 1 + .../templates/query-frontend/query-frontend-dep.yaml | 2 ++ .../query-frontend/query-frontend-svc-headless.yaml | 1 + .../templates/query-frontend/query-frontend-svc.yaml | 1 + .../mimir-distributed/templates/role.yaml | 1 + .../mimir-distributed/templates/rolebinding.yaml | 1 + .../mimir-distributed/templates/ruler/ruler-dep.yaml | 2 ++ .../mimir-distributed/templates/ruler/ruler-svc.yaml | 1 + .../templates/runtime-configmap.yaml | 1 + .../mimir-distributed/templates/serviceaccount.yaml | 1 + .../templates/store-gateway/store-gateway-pdb.yaml | 1 + .../store-gateway/store-gateway-statefulset.yaml | 2 ++ .../store-gateway/store-gateway-svc-headless.yaml | 1 + .../templates/store-gateway/store-gateway-svc.yaml | 1 + .../templates/tokengen/tokengen-job.yaml | 2 ++ .../alertmanager/alertmanager-statefulset.yaml | 2 ++ .../alertmanager/alertmanager-svc-headless.yaml | 1 + .../templates/alertmanager/alertmanager-svc.yaml | 1 + .../templates/compactor/compactor-statefulset.yaml | 2 ++ .../templates/compactor/compactor-svc.yaml | 1 + .../templates/distributor/distributor-dep.yaml | 2 ++ .../distributor/distributor-svc-headless.yaml | 1 + .../templates/distributor/distributor-svc.yaml | 1 + .../templates/gossip-ring/gossip-ring-svc.yaml | 1 + .../templates/ingester/ingester-pdb.yaml | 1 + .../templates/ingester/ingester-statefulset.yaml | 2 ++ .../templates/ingester/ingester-svc-headless.yaml | 1 + .../templates/ingester/ingester-svc.yaml | 1 + .../mimir-distributed/templates/mimir-config.yaml | 1 + .../mimir-distributed/templates/minio-secrets.yaml | 1 + .../templates/nginx/nginx-configmap.yaml | 1 + .../mimir-distributed/templates/nginx/nginx-dep.yaml | 2 ++ .../mimir-distributed/templates/nginx/nginx-svc.yaml | 1 + .../overrides-exporter/overrides-exporter-dep.yaml | 2 ++ .../overrides-exporter/overrides-exporter-svc.yaml | 1 + .../templates/podsecuritypolicy.yaml | 1 + .../templates/querier/querier-dep.yaml | 1 + .../templates/querier/querier-svc.yaml | 1 + .../templates/query-frontend/query-frontend-dep.yaml | 2 ++ .../query-frontend/query-frontend-svc-headless.yaml | 1 + .../templates/query-frontend/query-frontend-svc.yaml | 1 + .../mimir-distributed/templates/role.yaml | 1 + .../mimir-distributed/templates/rolebinding.yaml | 1 + .../mimir-distributed/templates/ruler/ruler-dep.yaml | 2 ++ .../mimir-distributed/templates/ruler/ruler-svc.yaml | 1 + .../templates/runtime-configmap.yaml | 1 + .../mimir-distributed/templates/serviceaccount.yaml | 1 + .../templates/store-gateway/store-gateway-pdb.yaml | 1 + .../store-gateway/store-gateway-statefulset.yaml | 2 ++ .../store-gateway/store-gateway-svc-headless.yaml | 1 + .../templates/store-gateway/store-gateway-svc.yaml | 1 + 173 files changed, 225 insertions(+), 6 deletions(-) diff --git a/operations/helm/charts/mimir-distributed/CHANGELOG.md b/operations/helm/charts/mimir-distributed/CHANGELOG.md index 15009a305b..9462e5b320 100644 --- a/operations/helm/charts/mimir-distributed/CHANGELOG.md +++ b/operations/helm/charts/mimir-distributed/CHANGELOG.md @@ -16,6 +16,8 @@ Entries should include a reference to the Pull Request that introduced the chang * [CHANGE] Enable multi-tenancy by default. This means `multitenancy_enabled` is now `true` for both Mimir and Enterprise Metrics. Nginx will inject `X-Scope-OrgID=anonymous` header if the header is not present, ensuring backwards compatibility. #2117 * [CHANGE] Change default value for `blocks_storage.bucket_store.chunks_cache.memcached.timeout` to `450ms` to increase use of cached data. #2035 * [FEATURE] Add `mimir-continuous-test` in smoke-test mode. Use `helm test` to run a smoke test of the read + write path. +* [ENHANCEMENT] ServiceMonitor object will now have default values based on release namesapce in the `namespace` and `namespaceSelector` fields. #2123 +* [ENHANCEMENT] Set the `namespace` metadata field for all kubernetes objects to enable using `--namespace` correctly with Helm even if the specified namespace does not exist. #2123 * [ENHANCEMENT] The new value `serviceMonitor.clusterLabel` controls whether to add a `cluster` label and with what content to ServiceMonitor metrics. #2125 * [ENHANCEMENT] Set the flag `ingester.ring.instance-availability-zone` to `zone-default` for ingesters. This is the first step of introducing multi-zone ingesters. #2114 * [ENHANCEMENT] Add `mimir.structuredConfig` for adding and modifing `mimir.config` values after template evaulation. It can be used to alter individual values in the configuration and it's structured YAML instead of text. #2100 diff --git a/operations/helm/charts/mimir-distributed/templates/admin-api/admin-api-dep.yaml b/operations/helm/charts/mimir-distributed/templates/admin-api/admin-api-dep.yaml index 577c2ce033..e5571d0e1f 100644 --- a/operations/helm/charts/mimir-distributed/templates/admin-api/admin-api-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/admin-api/admin-api-dep.yaml @@ -7,6 +7,7 @@ metadata: labels: {{- include "mimir.labels" (dict "ctx" . "component" "admin-api" "memberlist" true) | nindent 4 }} name: {{ include "mimir.resourceName" (dict "ctx" . "component" "admin-api") }} + namespace: {{ .Release.Namespace | quote }} spec: replicas: {{ .Values.admin_api.replicas }} selector: @@ -23,6 +24,7 @@ spec: {{- end }} annotations: {{- include "mimir.podAnnotations" (dict "ctx" . "component" "admin-api") | nindent 8 }} + namespace: {{ .Release.Namespace | quote }} spec: serviceAccountName: {{ template "mimir.serviceAccountName" . }} {{- if .Values.admin_api.priorityClassName }} diff --git a/operations/helm/charts/mimir-distributed/templates/admin-api/admin-api-pdb.yaml b/operations/helm/charts/mimir-distributed/templates/admin-api/admin-api-pdb.yaml index 527f992b1a..40e90c6e1d 100644 --- a/operations/helm/charts/mimir-distributed/templates/admin-api/admin-api-pdb.yaml +++ b/operations/helm/charts/mimir-distributed/templates/admin-api/admin-api-pdb.yaml @@ -6,6 +6,7 @@ metadata: name: {{ include "mimir.resourceName" (dict "ctx" . "component" "admin-api") }} labels: {{- include "mimir.labels" (dict "ctx" . "component" "admin-api" "memberlist" true) | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: selector: matchLabels: diff --git a/operations/helm/charts/mimir-distributed/templates/admin-api/admin-api-svc.yaml b/operations/helm/charts/mimir-distributed/templates/admin-api/admin-api-svc.yaml index 61904bfcb4..64dfb60164 100644 --- a/operations/helm/charts/mimir-distributed/templates/admin-api/admin-api-svc.yaml +++ b/operations/helm/charts/mimir-distributed/templates/admin-api/admin-api-svc.yaml @@ -10,6 +10,7 @@ metadata: {{- end }} annotations: {{- toYaml .Values.admin_api.service.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: type: ClusterIP ports: diff --git a/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-dep.yaml b/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-dep.yaml index f56fd23b7e..f48f774c28 100644 --- a/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-dep.yaml @@ -8,6 +8,7 @@ metadata: {{- include "mimir.labels" (dict "ctx" . "component" "alertmanager" "memberlist" true) | nindent 4 }} annotations: {{- toYaml .Values.alertmanager.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: replicas: {{ .Values.alertmanager.replicas }} selector: @@ -24,6 +25,7 @@ spec: {{- end }} annotations: {{- include "mimir.podAnnotations" (dict "ctx" . "component" "alertmanager") | nindent 8 }} + namespace: {{ .Release.Namespace | quote }} spec: serviceAccountName: {{ template "mimir.serviceAccountName" . }} {{- if .Values.alertmanager.priorityClassName }} diff --git a/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-pdb.yaml b/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-pdb.yaml index 51de66d84d..cb58c9b6f5 100644 --- a/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-pdb.yaml +++ b/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-pdb.yaml @@ -6,6 +6,7 @@ metadata: name: {{ include "mimir.resourceName" (dict "ctx" . "component" "alertmanager") }} labels: {{- include "mimir.labels" (dict "ctx" . "component" "alertmanager" "memberlist" true) | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: selector: matchLabels: diff --git a/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml b/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml index 6f1d3298c1..7a879ede2c 100644 --- a/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml +++ b/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml @@ -8,6 +8,7 @@ metadata: {{- include "mimir.labels" (dict "ctx" . "component" "alertmanager" "memberlist" true) | nindent 4 }} annotations: {{- toYaml .Values.alertmanager.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: replicas: {{ .Values.alertmanager.replicas }} selector: @@ -47,6 +48,7 @@ spec: {{- end }} annotations: {{- include "mimir.podAnnotations" (dict "ctx" . "component" "alertmanager") | nindent 8 }} + namespace: {{ .Release.Namespace | quote }} spec: serviceAccountName: {{ template "mimir.serviceAccountName" . }} {{- if .Values.alertmanager.priorityClassName }} diff --git a/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml b/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml index 852c4cc688..f3ced60f4e 100644 --- a/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml +++ b/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml @@ -12,6 +12,7 @@ metadata: {{- end }} annotations: {{- toYaml .Values.alertmanager.service.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: type: ClusterIP clusterIP: None diff --git a/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml b/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml index 1b816c14b0..d77140b8e4 100644 --- a/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml +++ b/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml @@ -10,6 +10,7 @@ metadata: {{- end }} annotations: {{- toYaml .Values.alertmanager.service.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: type: ClusterIP ports: diff --git a/operations/helm/charts/mimir-distributed/templates/compactor/compactor-pdb.yaml b/operations/helm/charts/mimir-distributed/templates/compactor/compactor-pdb.yaml index c2835a5970..c559cea26e 100644 --- a/operations/helm/charts/mimir-distributed/templates/compactor/compactor-pdb.yaml +++ b/operations/helm/charts/mimir-distributed/templates/compactor/compactor-pdb.yaml @@ -5,6 +5,7 @@ metadata: name: {{ include "mimir.resourceName" (dict "ctx" . "component" "compactor") }} labels: {{- include "mimir.labels" (dict "ctx" . "component" "compactor" "memberlist" true) | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: selector: matchLabels: diff --git a/operations/helm/charts/mimir-distributed/templates/compactor/compactor-statefulset.yaml b/operations/helm/charts/mimir-distributed/templates/compactor/compactor-statefulset.yaml index ad47b42046..e2f7562b00 100644 --- a/operations/helm/charts/mimir-distributed/templates/compactor/compactor-statefulset.yaml +++ b/operations/helm/charts/mimir-distributed/templates/compactor/compactor-statefulset.yaml @@ -6,6 +6,7 @@ metadata: {{- include "mimir.labels" (dict "ctx" . "component" "compactor" "memberlist" true) | nindent 4 }} annotations: {{- toYaml .Values.compactor.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: replicas: {{ .Values.compactor.replicas }} selector: @@ -45,6 +46,7 @@ spec: {{- end }} annotations: {{- include "mimir.podAnnotations" (dict "ctx" . "component" "compactor") | nindent 8 }} + namespace: {{ .Release.Namespace | quote }} spec: serviceAccountName: {{ template "mimir.serviceAccountName" . }} {{- if .Values.compactor.priorityClassName }} diff --git a/operations/helm/charts/mimir-distributed/templates/compactor/compactor-svc.yaml b/operations/helm/charts/mimir-distributed/templates/compactor/compactor-svc.yaml index 855e341e6c..a761a54740 100644 --- a/operations/helm/charts/mimir-distributed/templates/compactor/compactor-svc.yaml +++ b/operations/helm/charts/mimir-distributed/templates/compactor/compactor-svc.yaml @@ -9,6 +9,7 @@ metadata: {{- end }} annotations: {{- toYaml .Values.compactor.service.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: type: ClusterIP ports: diff --git a/operations/helm/charts/mimir-distributed/templates/distributor/distributor-dep.yaml b/operations/helm/charts/mimir-distributed/templates/distributor/distributor-dep.yaml index 76de930e4b..74d96b1bee 100644 --- a/operations/helm/charts/mimir-distributed/templates/distributor/distributor-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/distributor/distributor-dep.yaml @@ -6,6 +6,7 @@ metadata: {{- include "mimir.labels" (dict "ctx" . "component" "distributor" "memberlist" true) | nindent 4 }} annotations: {{- toYaml .Values.distributor.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: replicas: {{ .Values.distributor.replicas }} selector: @@ -22,6 +23,7 @@ spec: {{- end }} annotations: {{- include "mimir.podAnnotations" (dict "ctx" . "component" "distributor") | nindent 8 }} + namespace: {{ .Release.Namespace | quote }} spec: serviceAccountName: {{ template "mimir.serviceAccountName" . }} {{- if .Values.distributor.priorityClassName }} diff --git a/operations/helm/charts/mimir-distributed/templates/distributor/distributor-pdb.yaml b/operations/helm/charts/mimir-distributed/templates/distributor/distributor-pdb.yaml index c9fca6342a..4e11275498 100644 --- a/operations/helm/charts/mimir-distributed/templates/distributor/distributor-pdb.yaml +++ b/operations/helm/charts/mimir-distributed/templates/distributor/distributor-pdb.yaml @@ -5,6 +5,7 @@ metadata: name: {{ include "mimir.resourceName" (dict "ctx" . "component" "distributor") }} labels: {{- include "mimir.labels" (dict "ctx" . "component" "distributor" "memberlist" true) | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: selector: matchLabels: diff --git a/operations/helm/charts/mimir-distributed/templates/distributor/distributor-svc-headless.yaml b/operations/helm/charts/mimir-distributed/templates/distributor/distributor-svc-headless.yaml index 049f4406a7..4b9395f053 100644 --- a/operations/helm/charts/mimir-distributed/templates/distributor/distributor-svc-headless.yaml +++ b/operations/helm/charts/mimir-distributed/templates/distributor/distributor-svc-headless.yaml @@ -10,6 +10,7 @@ metadata: {{- end }} annotations: {{- toYaml .Values.distributor.service.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: type: ClusterIP clusterIP: None diff --git a/operations/helm/charts/mimir-distributed/templates/distributor/distributor-svc.yaml b/operations/helm/charts/mimir-distributed/templates/distributor/distributor-svc.yaml index c3a911a66d..868949de14 100644 --- a/operations/helm/charts/mimir-distributed/templates/distributor/distributor-svc.yaml +++ b/operations/helm/charts/mimir-distributed/templates/distributor/distributor-svc.yaml @@ -9,6 +9,7 @@ metadata: {{- end }} annotations: {{- toYaml .Values.distributor.service.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: type: ClusterIP ports: diff --git a/operations/helm/charts/mimir-distributed/templates/gateway/gateway-dep.yaml b/operations/helm/charts/mimir-distributed/templates/gateway/gateway-dep.yaml index 4ab7d94e68..342d291b78 100644 --- a/operations/helm/charts/mimir-distributed/templates/gateway/gateway-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/gateway/gateway-dep.yaml @@ -7,6 +7,7 @@ metadata: labels: {{- include "mimir.labels" (dict "ctx" . "component" "gateway") | nindent 4 }} name: {{ include "mimir.resourceName" (dict "ctx" . "component" "gateway") }} + namespace: {{ .Release.Namespace | quote }} spec: replicas: {{ .Values.gateway.replicas }} selector: @@ -23,6 +24,7 @@ spec: {{- end }} annotations: {{- include "mimir.podAnnotations" (dict "ctx" . "component" "gateway") | nindent 8 }} + namespace: {{ .Release.Namespace | quote }} spec: serviceAccountName: {{ template "mimir.serviceAccountName" . }} {{- if .Values.gateway.priorityClassName }} diff --git a/operations/helm/charts/mimir-distributed/templates/gateway/gateway-ingress.yaml b/operations/helm/charts/mimir-distributed/templates/gateway/gateway-ingress.yaml index 6fc634fb6c..a719bd37b0 100644 --- a/operations/helm/charts/mimir-distributed/templates/gateway/gateway-ingress.yaml +++ b/operations/helm/charts/mimir-distributed/templates/gateway/gateway-ingress.yaml @@ -13,6 +13,7 @@ metadata: annotations: {{- toYaml . | nindent 4 }} {{- end }} + namespace: {{ .Release.Namespace | quote }} spec: {{- if and $ingressSupportsIngressClassName .Values.gateway.ingress.ingressClassName }} ingressClassName: {{ .Values.gateway.ingress.ingressClassName }} diff --git a/operations/helm/charts/mimir-distributed/templates/gateway/gateway-pdb.yaml b/operations/helm/charts/mimir-distributed/templates/gateway/gateway-pdb.yaml index 7ae256b410..36a5494ac5 100644 --- a/operations/helm/charts/mimir-distributed/templates/gateway/gateway-pdb.yaml +++ b/operations/helm/charts/mimir-distributed/templates/gateway/gateway-pdb.yaml @@ -6,6 +6,7 @@ metadata: name: {{ include "mimir.resourceName" (dict "ctx" . "component" "gateway") }} labels: {{- include "mimir.labels" (dict "ctx" . "component" "gateway") | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: selector: matchLabels: diff --git a/operations/helm/charts/mimir-distributed/templates/gateway/gateway-svc.yaml b/operations/helm/charts/mimir-distributed/templates/gateway/gateway-svc.yaml index 4912ab2894..af51e9b401 100644 --- a/operations/helm/charts/mimir-distributed/templates/gateway/gateway-svc.yaml +++ b/operations/helm/charts/mimir-distributed/templates/gateway/gateway-svc.yaml @@ -10,6 +10,7 @@ metadata: {{- end }} annotations: {{- toYaml .Values.gateway.service.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: type: ClusterIP ports: diff --git a/operations/helm/charts/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml b/operations/helm/charts/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml index ff81e2ba77..fc72068a93 100644 --- a/operations/helm/charts/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml +++ b/operations/helm/charts/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml @@ -4,6 +4,7 @@ metadata: name: {{ include "mimir.resourceName" (dict "ctx" . "component" "gossip-ring") }} labels: {{- include "mimir.labels" (dict "ctx" . "component" "gossip-ring") | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: type: ClusterIP clusterIP: None diff --git a/operations/helm/charts/mimir-distributed/templates/ingester/ingester-pdb.yaml b/operations/helm/charts/mimir-distributed/templates/ingester/ingester-pdb.yaml index e4358f086a..7c0ff5c815 100644 --- a/operations/helm/charts/mimir-distributed/templates/ingester/ingester-pdb.yaml +++ b/operations/helm/charts/mimir-distributed/templates/ingester/ingester-pdb.yaml @@ -5,6 +5,7 @@ metadata: name: {{ include "mimir.resourceName" (dict "ctx" . "component" "ingester") }} labels: {{- include "mimir.labels" (dict "ctx" . "component" "ingester" "memberlist" true) | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: selector: matchLabels: diff --git a/operations/helm/charts/mimir-distributed/templates/ingester/ingester-statefulset.yaml b/operations/helm/charts/mimir-distributed/templates/ingester/ingester-statefulset.yaml index 029ea98775..377549e8b7 100644 --- a/operations/helm/charts/mimir-distributed/templates/ingester/ingester-statefulset.yaml +++ b/operations/helm/charts/mimir-distributed/templates/ingester/ingester-statefulset.yaml @@ -7,6 +7,7 @@ metadata: {{- include "mimir.labels" (dict "ctx" . "component" "ingester" "memberlist" true) | nindent 4 }} annotations: {{- toYaml .Values.ingester.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: podManagementPolicy: {{ .Values.ingester.podManagementPolicy }} replicas: {{ .Values.ingester.replicas }} @@ -47,6 +48,7 @@ spec: {{- end }} annotations: {{- include "mimir.podAnnotations" (dict "ctx" . "component" "ingester") | nindent 8 }} + namespace: {{ .Release.Namespace | quote }} spec: serviceAccountName: {{ template "mimir.serviceAccountName" . }} {{- if .Values.ingester.priorityClassName }} diff --git a/operations/helm/charts/mimir-distributed/templates/ingester/ingester-svc-headless.yaml b/operations/helm/charts/mimir-distributed/templates/ingester/ingester-svc-headless.yaml index fc67ba377b..43ec71d79a 100644 --- a/operations/helm/charts/mimir-distributed/templates/ingester/ingester-svc-headless.yaml +++ b/operations/helm/charts/mimir-distributed/templates/ingester/ingester-svc-headless.yaml @@ -10,6 +10,7 @@ metadata: {{- end }} annotations: {{- toYaml .Values.ingester.service.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: type: ClusterIP clusterIP: None diff --git a/operations/helm/charts/mimir-distributed/templates/ingester/ingester-svc.yaml b/operations/helm/charts/mimir-distributed/templates/ingester/ingester-svc.yaml index a72aaa4600..3003f5f43c 100644 --- a/operations/helm/charts/mimir-distributed/templates/ingester/ingester-svc.yaml +++ b/operations/helm/charts/mimir-distributed/templates/ingester/ingester-svc.yaml @@ -9,6 +9,7 @@ metadata: {{- end }} annotations: {{- toYaml .Values.ingester.service.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: type: ClusterIP ports: diff --git a/operations/helm/charts/mimir-distributed/templates/lib/service-monitor.tpl b/operations/helm/charts/mimir-distributed/templates/lib/service-monitor.tpl index a6be5036b6..3357d95dfa 100644 --- a/operations/helm/charts/mimir-distributed/templates/lib/service-monitor.tpl +++ b/operations/helm/charts/mimir-distributed/templates/lib/service-monitor.tpl @@ -12,9 +12,7 @@ apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: {{ include "mimir.resourceName" $ }} - {{- with .namespace }} - namespace: {{ . }} - {{- end }} + namespace: {{ .namespace | default $.ctx.Release.Namespace | quote }} labels: {{- include "mimir.labels" $ | nindent 4 }} {{- with .labels }} @@ -25,9 +23,12 @@ metadata: {{- toYaml . | nindent 4 }} {{- end }} spec: - {{- with .namespaceSelector }} namespaceSelector: - {{- toYaml . | nindent 4 }} + {{- if .namespaceSelector }} + {{- toYaml .namespaceSelector | nindent 4 }} + {{- else }} + matchNames: + - {{ $.ctx.Release.Namespace }} {{- end }} selector: matchLabels: diff --git a/operations/helm/charts/mimir-distributed/templates/license-secret.yaml b/operations/helm/charts/mimir-distributed/templates/license-secret.yaml index da73d63fb1..c992e8877f 100644 --- a/operations/helm/charts/mimir-distributed/templates/license-secret.yaml +++ b/operations/helm/charts/mimir-distributed/templates/license-secret.yaml @@ -6,6 +6,7 @@ metadata: name: {{ tpl .Values.license.secretName . }} labels: {{- include "mimir.labels" (dict "ctx" .) | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} data: license.jwt: {{ .Values.license.contents | b64enc }} {{- end -}} diff --git a/operations/helm/charts/mimir-distributed/templates/mimir-config.yaml b/operations/helm/charts/mimir-distributed/templates/mimir-config.yaml index 35837491f3..af72cc8316 100644 --- a/operations/helm/charts/mimir-distributed/templates/mimir-config.yaml +++ b/operations/helm/charts/mimir-distributed/templates/mimir-config.yaml @@ -9,6 +9,7 @@ metadata: name: {{ tpl .Values.externalConfigSecretName . }} labels: {{- include "mimir.labels" (dict "ctx" .) | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} {{- if eq .Values.configStorageType "Secret" }} data: mimir.yaml: {{ include "mimir.calculatedConfig" . | b64enc }} diff --git a/operations/helm/charts/mimir-distributed/templates/minio-secrets.yaml b/operations/helm/charts/mimir-distributed/templates/minio-secrets.yaml index cdd40e8e85..f92f4768d4 100644 --- a/operations/helm/charts/mimir-distributed/templates/minio-secrets.yaml +++ b/operations/helm/charts/mimir-distributed/templates/minio-secrets.yaml @@ -5,6 +5,7 @@ metadata: name: mimir-minio-secret labels: {{- include "mimir.labels" (dict "ctx" .) | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} type: Opaque data: MINIO_ACCESS_KEY_ID: {{ .Values.minio.accessKey | b64enc }} diff --git a/operations/helm/charts/mimir-distributed/templates/nginx/ingress.yaml b/operations/helm/charts/mimir-distributed/templates/nginx/ingress.yaml index a203c15c67..0f45dbe005 100644 --- a/operations/helm/charts/mimir-distributed/templates/nginx/ingress.yaml +++ b/operations/helm/charts/mimir-distributed/templates/nginx/ingress.yaml @@ -14,6 +14,7 @@ metadata: annotations: {{- toYaml . | nindent 4 }} {{- end }} + namespace: {{ .Release.Namespace | quote }} spec: {{- if and $ingressSupportsIngressClassName .Values.nginx.ingress.ingressClassName }} ingressClassName: {{ .Values.nginx.ingress.ingressClassName }} diff --git a/operations/helm/charts/mimir-distributed/templates/nginx/nginx-configmap.yaml b/operations/helm/charts/mimir-distributed/templates/nginx/nginx-configmap.yaml index f78228b8a8..2f4ebefa8c 100644 --- a/operations/helm/charts/mimir-distributed/templates/nginx/nginx-configmap.yaml +++ b/operations/helm/charts/mimir-distributed/templates/nginx/nginx-configmap.yaml @@ -6,6 +6,7 @@ metadata: name: {{ include "mimir.resourceName" (dict "ctx" . "component" "nginx") }} labels: {{- include "mimir.labels" (dict "ctx" . "component" "nginx") | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} data: nginx.conf: | {{- tpl .Values.nginx.nginxConfig.file . | nindent 4 }} diff --git a/operations/helm/charts/mimir-distributed/templates/nginx/nginx-dep.yaml b/operations/helm/charts/mimir-distributed/templates/nginx/nginx-dep.yaml index 4692474a46..d4cc8b5099 100644 --- a/operations/helm/charts/mimir-distributed/templates/nginx/nginx-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/nginx/nginx-dep.yaml @@ -8,6 +8,7 @@ metadata: {{- include "mimir.labels" (dict "ctx" . "component" "nginx") | nindent 4 }} annotations: {{- toYaml .Values.nginx.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: {{- if not .Values.nginx.autoscaling.enabled }} replicas: {{ .Values.nginx.replicas }} @@ -34,6 +35,7 @@ spec: {{- with .Values.nginx.podLabels }} {{- toYaml . | nindent 8 }} {{- end }} + namespace: {{ .Release.Namespace | quote }} spec: serviceAccountName: {{ include "mimir.serviceAccountName" . }} {{- with .Values.imagePullSecrets }} diff --git a/operations/helm/charts/mimir-distributed/templates/nginx/nginx-hpa.yaml b/operations/helm/charts/mimir-distributed/templates/nginx/nginx-hpa.yaml index b7061ca4d6..92bc6ce2c9 100644 --- a/operations/helm/charts/mimir-distributed/templates/nginx/nginx-hpa.yaml +++ b/operations/helm/charts/mimir-distributed/templates/nginx/nginx-hpa.yaml @@ -6,6 +6,7 @@ metadata: name: {{ include "mimir.resourceName" (dict "ctx" . "component" "nginx") }} labels: {{- include "mimir.labels" (dict "ctx" . "component" "nginx") | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: scaleTargetRef: apiVersion: apps/v1 diff --git a/operations/helm/charts/mimir-distributed/templates/nginx/nginx-pdb.yaml b/operations/helm/charts/mimir-distributed/templates/nginx/nginx-pdb.yaml index 09ac6f617a..bf4e12f5d2 100644 --- a/operations/helm/charts/mimir-distributed/templates/nginx/nginx-pdb.yaml +++ b/operations/helm/charts/mimir-distributed/templates/nginx/nginx-pdb.yaml @@ -6,6 +6,7 @@ metadata: name: {{ include "mimir.resourceName" (dict "ctx" . "component" "nginx") }} labels: {{- include "mimir.labels" (dict "ctx" . "component" "nginx") | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: selector: matchLabels: diff --git a/operations/helm/charts/mimir-distributed/templates/nginx/nginx-secret.yaml b/operations/helm/charts/mimir-distributed/templates/nginx/nginx-secret.yaml index 1c3fe6a677..3bb1d01fa2 100644 --- a/operations/helm/charts/mimir-distributed/templates/nginx/nginx-secret.yaml +++ b/operations/helm/charts/mimir-distributed/templates/nginx/nginx-secret.yaml @@ -6,6 +6,7 @@ metadata: name: {{ include "mimir.resourceName" (dict "ctx" . "component" "nginx") }} labels: {{- include "mimir.labels" (dict "ctx" . "component" "nginx") | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} stringData: .htpasswd: | {{- tpl .Values.nginx.basicAuth.htpasswd $ | nindent 4 }} diff --git a/operations/helm/charts/mimir-distributed/templates/nginx/nginx-svc.yaml b/operations/helm/charts/mimir-distributed/templates/nginx/nginx-svc.yaml index 2df10354f8..1cad15bd8d 100644 --- a/operations/helm/charts/mimir-distributed/templates/nginx/nginx-svc.yaml +++ b/operations/helm/charts/mimir-distributed/templates/nginx/nginx-svc.yaml @@ -11,6 +11,7 @@ metadata: {{- end }} annotations: {{- toYaml .Values.nginx.service.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: type: {{ .Values.nginx.service.type }} {{- with .Values.nginx.service.clusterIP }} diff --git a/operations/helm/charts/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml b/operations/helm/charts/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml index 346da8196a..0b61d974b5 100644 --- a/operations/helm/charts/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml @@ -7,6 +7,7 @@ metadata: labels: {{- include "mimir.labels" (dict "ctx" . "component" "overrides-exporter") | nindent 4 }} name: {{ include "mimir.resourceName" (dict "ctx" . "component" "overrides-exporter") }} + namespace: {{ .Release.Namespace | quote }} spec: replicas: {{ .Values.overrides_exporter.replicas }} selector: @@ -23,6 +24,7 @@ spec: {{- end }} annotations: {{- include "mimir.podAnnotations" (dict "ctx" . "component" "overrides-exporter") | nindent 8 }} + namespace: {{ .Release.Namespace | quote }} spec: serviceAccountName: {{ template "mimir.serviceAccountName" . }} {{- if .Values.overrides_exporter.priorityClassName }} diff --git a/operations/helm/charts/mimir-distributed/templates/overrides-exporter/overrides-exporter-pdb.yaml b/operations/helm/charts/mimir-distributed/templates/overrides-exporter/overrides-exporter-pdb.yaml index 11dfa23b6d..a9cf4aba4b 100644 --- a/operations/helm/charts/mimir-distributed/templates/overrides-exporter/overrides-exporter-pdb.yaml +++ b/operations/helm/charts/mimir-distributed/templates/overrides-exporter/overrides-exporter-pdb.yaml @@ -6,6 +6,7 @@ metadata: name: {{ include "mimir.resourceName" (dict "ctx" . "component" "overrides-exporter") }} labels: {{- include "mimir.labels" (dict "ctx" . "component" "overrides-exporter") | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: selector: matchLabels: diff --git a/operations/helm/charts/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml b/operations/helm/charts/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml index 24daeafd6d..5e42a7f3f6 100644 --- a/operations/helm/charts/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml +++ b/operations/helm/charts/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml @@ -10,6 +10,7 @@ metadata: {{- end }} annotations: {{- toYaml .Values.overrides_exporter.service.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: type: ClusterIP ports: diff --git a/operations/helm/charts/mimir-distributed/templates/podsecuritypolicy.yaml b/operations/helm/charts/mimir-distributed/templates/podsecuritypolicy.yaml index b5a615ae7e..bd5325c61f 100644 --- a/operations/helm/charts/mimir-distributed/templates/podsecuritypolicy.yaml +++ b/operations/helm/charts/mimir-distributed/templates/podsecuritypolicy.yaml @@ -5,6 +5,7 @@ metadata: name: {{ include "mimir.resourceName" (dict "ctx" .) }} labels: {{- include "mimir.labels" (dict "ctx" .) | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: privileged: false allowPrivilegeEscalation: false diff --git a/operations/helm/charts/mimir-distributed/templates/querier/querier-dep.yaml b/operations/helm/charts/mimir-distributed/templates/querier/querier-dep.yaml index aff0068dd5..efa435bf0f 100644 --- a/operations/helm/charts/mimir-distributed/templates/querier/querier-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/querier/querier-dep.yaml @@ -6,6 +6,7 @@ metadata: {{- include "mimir.labels" (dict "ctx" . "component" "querier" "memberlist" true) | nindent 4 }} annotations: {{- toYaml .Values.querier.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: replicas: {{ .Values.querier.replicas }} selector: diff --git a/operations/helm/charts/mimir-distributed/templates/querier/querier-pdb.yaml b/operations/helm/charts/mimir-distributed/templates/querier/querier-pdb.yaml index b5801e4c88..c82bb4c5a4 100644 --- a/operations/helm/charts/mimir-distributed/templates/querier/querier-pdb.yaml +++ b/operations/helm/charts/mimir-distributed/templates/querier/querier-pdb.yaml @@ -5,6 +5,7 @@ metadata: name: {{ include "mimir.resourceName" (dict "ctx" . "component" "querier") }} labels: {{- include "mimir.labels" (dict "ctx" . "component" "querier" "memberlist" true) | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: selector: matchLabels: diff --git a/operations/helm/charts/mimir-distributed/templates/querier/querier-svc.yaml b/operations/helm/charts/mimir-distributed/templates/querier/querier-svc.yaml index 685da16a80..1eb323ec5a 100644 --- a/operations/helm/charts/mimir-distributed/templates/querier/querier-svc.yaml +++ b/operations/helm/charts/mimir-distributed/templates/querier/querier-svc.yaml @@ -9,6 +9,7 @@ metadata: {{- end }} annotations: {{- toYaml .Values.querier.service.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: type: ClusterIP ports: diff --git a/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml b/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml index 0b5adfc478..e033a3f91f 100644 --- a/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml @@ -6,6 +6,7 @@ metadata: {{- include "mimir.labels" (dict "ctx" . "component" "query-frontend") | nindent 4 }} annotations: {{- toYaml .Values.query_frontend.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: replicas: {{ .Values.query_frontend.replicas }} selector: @@ -22,6 +23,7 @@ spec: {{- end }} annotations: {{- include "mimir.podAnnotations" (dict "ctx" . "component" "query-frontend") | nindent 8 }} + namespace: {{ .Release.Namespace | quote }} spec: serviceAccountName: {{ template "mimir.serviceAccountName" . }} {{- if .Values.query_frontend.priorityClassName }} diff --git a/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-pdb.yaml b/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-pdb.yaml index df73277136..f87260d653 100644 --- a/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-pdb.yaml +++ b/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-pdb.yaml @@ -5,6 +5,7 @@ metadata: name: {{ include "mimir.resourceName" (dict "ctx" . "component" "query-frontend") }} labels: {{- include "mimir.labels" (dict "ctx" . "component" "query-frontend") | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: selector: matchLabels: diff --git a/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-svc-headless.yaml b/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-svc-headless.yaml index 85630dd3f9..595669a208 100644 --- a/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-svc-headless.yaml +++ b/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-svc-headless.yaml @@ -10,6 +10,7 @@ metadata: {{- end }} annotations: {{- toYaml .Values.query_frontend.service.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: type: ClusterIP clusterIP: None diff --git a/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml b/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml index 7c97d6b722..3eec5d6420 100644 --- a/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml +++ b/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml @@ -9,6 +9,7 @@ metadata: {{- end }} annotations: {{- toYaml .Values.query_frontend.service.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: type: ClusterIP ports: diff --git a/operations/helm/charts/mimir-distributed/templates/role.yaml b/operations/helm/charts/mimir-distributed/templates/role.yaml index 770c4758a2..5d2c0df2ca 100644 --- a/operations/helm/charts/mimir-distributed/templates/role.yaml +++ b/operations/helm/charts/mimir-distributed/templates/role.yaml @@ -5,6 +5,7 @@ metadata: name: {{ include "mimir.resourceName" (dict "ctx" .) }} labels: {{- include "mimir.labels" (dict "ctx" .) | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} rules: - apiGroups: ['extensions'] resources: ['podsecuritypolicies'] diff --git a/operations/helm/charts/mimir-distributed/templates/rolebinding.yaml b/operations/helm/charts/mimir-distributed/templates/rolebinding.yaml index 8de7dbb9d8..f3fd6232b9 100644 --- a/operations/helm/charts/mimir-distributed/templates/rolebinding.yaml +++ b/operations/helm/charts/mimir-distributed/templates/rolebinding.yaml @@ -5,6 +5,7 @@ metadata: name: {{ include "mimir.resourceName" (dict "ctx" .) }} labels: {{- include "mimir.labels" (dict "ctx" .) | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} roleRef: apiGroup: rbac.authorization.k8s.io kind: Role diff --git a/operations/helm/charts/mimir-distributed/templates/ruler/ruler-dep.yaml b/operations/helm/charts/mimir-distributed/templates/ruler/ruler-dep.yaml index 9894810ff7..345108cb77 100644 --- a/operations/helm/charts/mimir-distributed/templates/ruler/ruler-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/ruler/ruler-dep.yaml @@ -7,6 +7,7 @@ metadata: {{- include "mimir.labels" (dict "ctx" . "component" "ruler" "memberlist" true) | nindent 4 }} annotations: {{- toYaml .Values.ruler.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: replicas: {{ .Values.ruler.replicas }} selector: @@ -23,6 +24,7 @@ spec: {{- end }} annotations: {{- include "mimir.podAnnotations" (dict "ctx" . "component" "ruler") | nindent 8 }} + namespace: {{ .Release.Namespace | quote }} spec: serviceAccountName: {{ template "mimir.serviceAccountName" . }} {{- if .Values.ruler.priorityClassName }} diff --git a/operations/helm/charts/mimir-distributed/templates/ruler/ruler-pdb.yaml b/operations/helm/charts/mimir-distributed/templates/ruler/ruler-pdb.yaml index 7308cd8b7f..1aab4864ad 100644 --- a/operations/helm/charts/mimir-distributed/templates/ruler/ruler-pdb.yaml +++ b/operations/helm/charts/mimir-distributed/templates/ruler/ruler-pdb.yaml @@ -6,6 +6,7 @@ metadata: name: {{ include "mimir.resourceName" (dict "ctx" . "component" "ruler") }} labels: {{- include "mimir.labels" (dict "ctx" . "component" "ruler" "memberlist" true) | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: selector: matchLabels: diff --git a/operations/helm/charts/mimir-distributed/templates/ruler/ruler-svc.yaml b/operations/helm/charts/mimir-distributed/templates/ruler/ruler-svc.yaml index b9abaeb4eb..477f626659 100644 --- a/operations/helm/charts/mimir-distributed/templates/ruler/ruler-svc.yaml +++ b/operations/helm/charts/mimir-distributed/templates/ruler/ruler-svc.yaml @@ -10,6 +10,7 @@ metadata: {{- end }} annotations: {{- toYaml .Values.ruler.service.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: type: ClusterIP ports: diff --git a/operations/helm/charts/mimir-distributed/templates/runtime-configmap.yaml b/operations/helm/charts/mimir-distributed/templates/runtime-configmap.yaml index 6a65dcf8b0..bdca1c8671 100644 --- a/operations/helm/charts/mimir-distributed/templates/runtime-configmap.yaml +++ b/operations/helm/charts/mimir-distributed/templates/runtime-configmap.yaml @@ -4,6 +4,7 @@ metadata: name: {{ include "mimir.resourceName" (dict "ctx" . "component" "runtime") }} labels: {{- include "mimir.labels" (dict "ctx" .) | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} data: runtime.yaml: | {{ tpl (toYaml .Values.runtimeConfig) . | nindent 4 }} diff --git a/operations/helm/charts/mimir-distributed/templates/serviceaccount.yaml b/operations/helm/charts/mimir-distributed/templates/serviceaccount.yaml index 35f66871b3..bad2d8e0be 100644 --- a/operations/helm/charts/mimir-distributed/templates/serviceaccount.yaml +++ b/operations/helm/charts/mimir-distributed/templates/serviceaccount.yaml @@ -7,4 +7,5 @@ metadata: {{- include "mimir.labels" (dict "ctx" .) | nindent 4 }} annotations: {{- toYaml .Values.serviceAccount.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} {{- end }} diff --git a/operations/helm/charts/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml b/operations/helm/charts/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml index 0ef24228ab..0f9eee4ee6 100644 --- a/operations/helm/charts/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml +++ b/operations/helm/charts/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml @@ -5,6 +5,7 @@ metadata: name: {{ include "mimir.resourceName" (dict "ctx" . "component" "store-gateway") }} labels: {{- include "mimir.labels" (dict "ctx" . "component" "store-gateway" "memberlist" true) | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: selector: matchLabels: diff --git a/operations/helm/charts/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml b/operations/helm/charts/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml index 41d591b80f..df0148b7e6 100644 --- a/operations/helm/charts/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml +++ b/operations/helm/charts/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml @@ -6,6 +6,7 @@ metadata: {{- include "mimir.labels" (dict "ctx" . "component" "store-gateway" "memberlist" true) | nindent 4 }} annotations: {{- toYaml .Values.store_gateway.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: replicas: {{ .Values.store_gateway.replicas }} selector: @@ -45,6 +46,7 @@ spec: {{- end }} annotations: {{- include "mimir.podAnnotations" (dict "ctx" . "component" "store-gateway") | nindent 8 }} + namespace: {{ .Release.Namespace | quote }} spec: serviceAccountName: {{ template "mimir.serviceAccountName" . }} {{- if .Values.store_gateway.priorityClassName }} diff --git a/operations/helm/charts/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml b/operations/helm/charts/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml index d79c9f940e..b985f4cad1 100644 --- a/operations/helm/charts/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml +++ b/operations/helm/charts/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml @@ -10,6 +10,7 @@ metadata: {{- end }} annotations: {{- toYaml .Values.store_gateway.service.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: type: ClusterIP clusterIP: None diff --git a/operations/helm/charts/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml b/operations/helm/charts/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml index 6d15731643..73ec5bd4e4 100644 --- a/operations/helm/charts/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml +++ b/operations/helm/charts/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml @@ -9,6 +9,7 @@ metadata: {{- end }} annotations: {{- toYaml .Values.store_gateway.service.annotations | nindent 4 }} + namespace: {{ .Release.Namespace | quote }} spec: type: ClusterIP ports: diff --git a/operations/helm/charts/mimir-distributed/templates/tokengen/tokengen-job.yaml b/operations/helm/charts/mimir-distributed/templates/tokengen/tokengen-job.yaml index 70ed537dbc..4e01f33ee2 100644 --- a/operations/helm/charts/mimir-distributed/templates/tokengen/tokengen-job.yaml +++ b/operations/helm/charts/mimir-distributed/templates/tokengen/tokengen-job.yaml @@ -11,6 +11,7 @@ metadata: {{- toYaml .Values.tokengenJob.annotations | nindent 4 }} {{- end }} "helm.sh/hook": post-install + namespace: {{ .Release.Namespace | quote }} spec: backoffLimit: 6 completions: 1 @@ -23,6 +24,7 @@ spec: {{- with .Values.tokengenJob.podLabels }} {{- toYaml . | nindent 8 }} {{- end }} + namespace: {{ .Release.Namespace | quote }} spec: serviceAccountName: {{ template "mimir.serviceAccountName" . }} {{- if .Values.tokengenJob.priorityClassName }} diff --git a/operations/helm/charts/mimir-distributed/values.yaml b/operations/helm/charts/mimir-distributed/values.yaml index 45c74b90ec..58c7087bfe 100644 --- a/operations/helm/charts/mimir-distributed/values.yaml +++ b/operations/helm/charts/mimir-distributed/values.yaml @@ -269,9 +269,11 @@ serviceMonitor: # Keep empty string "" to have the default value in the 'cluster' label, which is the helm release name for Mimir and the actual cluster name for Enterprise Metrics. clusterLabel: "" # -- Alternative namespace for ServiceMonitor resources + # If left unset, the default is to install the ServiceMonitor resources in the namespace where the chart is installed, i.e. the namespace specified for the helm command. namespace: null # -- Namespace selector for ServiceMonitor resources - namespaceSelector: {} + # If left unset, the default is to select the namespace where the chart is installed, i.e. the namespace specified for the helm command. + namespaceSelector: null # -- ServiceMonitor annotations annotations: {} # -- Additional ServiceMonitor labels diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml index 21feec5668..f507cb01e2 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm name: test-enterprise-configmap-values-mimir-admin-api + namespace: "citestns" spec: replicas: 1 selector: @@ -36,6 +37,7 @@ spec: app.kubernetes.io/part-of: memberlist annotations: minio-secret-version: "42" + namespace: "citestns" spec: serviceAccountName: test-enterprise-configmap-values-mimir securityContext: diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/admin-api/admin-api-svc.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/admin-api/admin-api-svc.yaml index 140e141bbb..7dd9fcae99 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/admin-api/admin-api-svc.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/admin-api/admin-api-svc.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml index 9f2548aff5..5873a7f93a 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: replicas: 1 selector: @@ -34,6 +35,7 @@ spec: app.kubernetes.io/part-of: memberlist annotations: minio-secret-version: "42" + namespace: "citestns" spec: serviceAccountName: test-enterprise-configmap-values-mimir securityContext: diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml index 7849e38448..803063e684 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml @@ -14,6 +14,7 @@ metadata: prometheus.io/service-monitor: "false" annotations: {} + namespace: "citestns" spec: type: ClusterIP clusterIP: None diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml index b8cec13ee9..10a48866d7 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml index 57e2992f1e..8ff3a6c137 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: replicas: 1 selector: @@ -34,6 +35,7 @@ spec: app.kubernetes.io/part-of: memberlist annotations: minio-secret-version: "42" + namespace: "citestns" spec: serviceAccountName: test-enterprise-configmap-values-mimir securityContext: diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/compactor/compactor-svc.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/compactor/compactor-svc.yaml index 8589e5ef56..0709f43d2a 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/compactor/compactor-svc.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/compactor/compactor-svc.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml index 79b511bba0..92b5af2dff 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: replicas: 1 selector: @@ -36,6 +37,7 @@ spec: app.kubernetes.io/part-of: memberlist annotations: minio-secret-version: "42" + namespace: "citestns" spec: serviceAccountName: test-enterprise-configmap-values-mimir securityContext: diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/distributor/distributor-svc-headless.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/distributor/distributor-svc-headless.yaml index b329228e3b..f0ad4b85b3 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/distributor/distributor-svc-headless.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/distributor/distributor-svc-headless.yaml @@ -14,6 +14,7 @@ metadata: prometheus.io/service-monitor: "false" annotations: {} + namespace: "citestns" spec: type: ClusterIP clusterIP: None diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/distributor/distributor-svc.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/distributor/distributor-svc.yaml index 6ea684ffb5..235c4242c9 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/distributor/distributor-svc.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/distributor/distributor-svc.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml index 45333ba0ab..b40fc6797f 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml @@ -12,6 +12,7 @@ metadata: app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm name: test-enterprise-configmap-values-mimir-gateway + namespace: "citestns" spec: replicas: 1 selector: @@ -34,6 +35,7 @@ spec: app.kubernetes.io/component: gateway annotations: minio-secret-version: "42" + namespace: "citestns" spec: serviceAccountName: test-enterprise-configmap-values-mimir securityContext: diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/gateway/gateway-svc.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/gateway/gateway-svc.yaml index e0b5eb0242..9b82a79cc0 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/gateway/gateway-svc.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/gateway/gateway-svc.yaml @@ -12,6 +12,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml index cc32f0e2d5..f2ed746f0a 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml @@ -10,6 +10,7 @@ metadata: app.kubernetes.io/component: gossip-ring app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" spec: type: ClusterIP clusterIP: None diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ingester/ingester-pdb.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ingester/ingester-pdb.yaml index e8228dad66..10ffee4db2 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ingester/ingester-pdb.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ingester/ingester-pdb.yaml @@ -11,6 +11,7 @@ metadata: app.kubernetes.io/part-of: memberlist app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" spec: selector: matchLabels: diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml index be76101b94..5942739591 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: podManagementPolicy: Parallel replicas: 3 @@ -35,6 +36,7 @@ spec: app.kubernetes.io/part-of: memberlist annotations: minio-secret-version: "42" + namespace: "citestns" spec: serviceAccountName: test-enterprise-configmap-values-mimir securityContext: diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ingester/ingester-svc-headless.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ingester/ingester-svc-headless.yaml index fbdb543dae..a6381751b0 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ingester/ingester-svc-headless.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ingester/ingester-svc-headless.yaml @@ -14,6 +14,7 @@ metadata: prometheus.io/service-monitor: "false" annotations: {} + namespace: "citestns" spec: type: ClusterIP clusterIP: None diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ingester/ingester-svc.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ingester/ingester-svc.yaml index a3c2c9ef83..4a3608a25b 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ingester/ingester-svc.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ingester/ingester-svc.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/license-secret.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/license-secret.yaml index b887b23256..13c517ea0a 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/license-secret.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/license-secret.yaml @@ -9,5 +9,6 @@ metadata: app.kubernetes.io/instance: test-enterprise-configmap-values app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" data: license.jwt: Tk9UQVZBTElETElDRU5TRQ== diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/mimir-config.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/mimir-config.yaml index b67a50c566..839ea93b44 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/mimir-config.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/mimir-config.yaml @@ -9,6 +9,7 @@ metadata: app.kubernetes.io/instance: test-enterprise-configmap-values app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" data: mimir.yaml: | diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/minio-secrets.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/minio-secrets.yaml index 1c41f1ecfd..6e50e37a37 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/minio-secrets.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/minio-secrets.yaml @@ -9,6 +9,7 @@ metadata: app.kubernetes.io/instance: test-enterprise-configmap-values app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" type: Opaque data: MINIO_ACCESS_KEY_ID: Z3JhZmFuYS1taW1pcg== diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml index c5f53ed98e..a018523f54 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml @@ -12,6 +12,7 @@ metadata: app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm name: test-enterprise-configmap-values-mimir-overrides-exporter + namespace: "citestns" spec: replicas: 1 selector: @@ -34,6 +35,7 @@ spec: app.kubernetes.io/component: overrides-exporter annotations: minio-secret-version: "42" + namespace: "citestns" spec: serviceAccountName: test-enterprise-configmap-values-mimir securityContext: diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml index 8f6c7ed86e..12bc42bdb3 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml @@ -12,6 +12,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/podsecuritypolicy.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/podsecuritypolicy.yaml index 90079c69d0..b74648d567 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/podsecuritypolicy.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/podsecuritypolicy.yaml @@ -9,6 +9,7 @@ metadata: app.kubernetes.io/instance: test-enterprise-configmap-values app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" spec: privileged: false allowPrivilegeEscalation: false diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/querier/querier-dep.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/querier/querier-dep.yaml index 11251349d1..f7cd76b446 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/querier/querier-dep.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/querier/querier-dep.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: replicas: 2 selector: diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/querier/querier-svc.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/querier/querier-svc.yaml index 4123ecc83c..c5505ab31a 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/querier/querier-svc.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/querier/querier-svc.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml index cc09f7ae2b..fc3647b4ef 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml @@ -12,6 +12,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: replicas: 1 selector: @@ -34,6 +35,7 @@ spec: app.kubernetes.io/component: query-frontend annotations: minio-secret-version: "42" + namespace: "citestns" spec: serviceAccountName: test-enterprise-configmap-values-mimir securityContext: diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc-headless.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc-headless.yaml index a696f539c9..c222613fb7 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc-headless.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc-headless.yaml @@ -13,6 +13,7 @@ metadata: prometheus.io/service-monitor: "false" annotations: {} + namespace: "citestns" spec: type: ClusterIP clusterIP: None diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml index 176664e738..590de9dd47 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml @@ -12,6 +12,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/role.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/role.yaml index 31ec4dfe9a..a92c85fadb 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/role.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/role.yaml @@ -9,6 +9,7 @@ metadata: app.kubernetes.io/instance: test-enterprise-configmap-values app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" rules: - apiGroups: ['extensions'] resources: ['podsecuritypolicies'] diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/rolebinding.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/rolebinding.yaml index 1631dc3225..24eb89af4b 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/rolebinding.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/rolebinding.yaml @@ -9,6 +9,7 @@ metadata: app.kubernetes.io/instance: test-enterprise-configmap-values app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" roleRef: apiGroup: rbac.authorization.k8s.io kind: Role diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml index d36f1615e0..6f75d68b8e 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: replicas: 1 selector: @@ -36,6 +37,7 @@ spec: app.kubernetes.io/part-of: memberlist annotations: minio-secret-version: "42" + namespace: "citestns" spec: serviceAccountName: test-enterprise-configmap-values-mimir securityContext: diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ruler/ruler-svc.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ruler/ruler-svc.yaml index 782d55958c..9493f5b735 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ruler/ruler-svc.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/ruler/ruler-svc.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/runtime-configmap.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/runtime-configmap.yaml index becb1113a4..1a983bea02 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/runtime-configmap.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/runtime-configmap.yaml @@ -9,6 +9,7 @@ metadata: app.kubernetes.io/instance: test-enterprise-configmap-values app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" data: runtime.yaml: | diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/serviceaccount.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/serviceaccount.yaml index 8bd6fdacfc..8482640f5d 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/serviceaccount.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/serviceaccount.yaml @@ -11,3 +11,4 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml index c8ea516450..61799390d4 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml @@ -11,6 +11,7 @@ metadata: app.kubernetes.io/part-of: memberlist app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" spec: selector: matchLabels: diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml index a53b6ea521..f1ca249ce6 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: replicas: 1 selector: @@ -34,6 +35,7 @@ spec: app.kubernetes.io/part-of: memberlist annotations: minio-secret-version: "42" + namespace: "citestns" spec: serviceAccountName: test-enterprise-configmap-values-mimir securityContext: diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml index 639b8f8865..2461482ba5 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml @@ -14,6 +14,7 @@ metadata: prometheus.io/service-monitor: "false" annotations: {} + namespace: "citestns" spec: type: ClusterIP clusterIP: None diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml index c78fb5b342..81bcfdc766 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml index 503cdf6445..e8424afc00 100644 --- a/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml +++ b/operations/helm/tests/test-enterprise-configmap-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml @@ -12,6 +12,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: "helm.sh/hook": post-install + namespace: "citestns" spec: backoffLimit: 6 completions: 1 @@ -25,6 +26,7 @@ spec: app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: tokengen + namespace: "citestns" spec: serviceAccountName: test-enterprise-configmap-values-mimir securityContext: diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml index 6eec839fae..905a2c02f0 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/admin-api/admin-api-dep.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm name: test-enterprise-values-mimir-admin-api + namespace: "citestns" spec: replicas: 1 selector: @@ -35,6 +36,7 @@ spec: app.kubernetes.io/component: admin-api app.kubernetes.io/part-of: memberlist annotations: + namespace: "citestns" spec: serviceAccountName: test-enterprise-values-mimir securityContext: diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/admin-api/admin-api-svc.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/admin-api/admin-api-svc.yaml index 490efd76b6..dfed6af3bb 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/admin-api/admin-api-svc.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/admin-api/admin-api-svc.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml index 64c5225d58..6179e25472 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: replicas: 1 selector: @@ -33,6 +34,7 @@ spec: app.kubernetes.io/component: alertmanager app.kubernetes.io/part-of: memberlist annotations: + namespace: "citestns" spec: serviceAccountName: test-enterprise-values-mimir securityContext: diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml index ed71e27e0f..219058399a 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml @@ -14,6 +14,7 @@ metadata: prometheus.io/service-monitor: "false" annotations: {} + namespace: "citestns" spec: type: ClusterIP clusterIP: None diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml index c775972920..85f0000600 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml index 8cb4eca2f0..dc965e60d0 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: replicas: 1 selector: @@ -33,6 +34,7 @@ spec: app.kubernetes.io/component: compactor app.kubernetes.io/part-of: memberlist annotations: + namespace: "citestns" spec: serviceAccountName: test-enterprise-values-mimir securityContext: diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/compactor/compactor-svc.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/compactor/compactor-svc.yaml index 84446ec3b2..d27ff2d0d4 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/compactor/compactor-svc.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/compactor/compactor-svc.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml index 7abcb00136..3dc9a87c3d 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: replicas: 1 selector: @@ -35,6 +36,7 @@ spec: app.kubernetes.io/component: distributor app.kubernetes.io/part-of: memberlist annotations: + namespace: "citestns" spec: serviceAccountName: test-enterprise-values-mimir securityContext: diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/distributor/distributor-svc-headless.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/distributor/distributor-svc-headless.yaml index 6fab05af4c..98f895f22c 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/distributor/distributor-svc-headless.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/distributor/distributor-svc-headless.yaml @@ -14,6 +14,7 @@ metadata: prometheus.io/service-monitor: "false" annotations: {} + namespace: "citestns" spec: type: ClusterIP clusterIP: None diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/distributor/distributor-svc.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/distributor/distributor-svc.yaml index 3b5cb73c8c..c3ee413c67 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/distributor/distributor-svc.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/distributor/distributor-svc.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml index 3ad9f8ebf6..ae7da39183 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/gateway/gateway-dep.yaml @@ -12,6 +12,7 @@ metadata: app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm name: test-enterprise-values-mimir-gateway + namespace: "citestns" spec: replicas: 1 selector: @@ -33,6 +34,7 @@ spec: app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: gateway annotations: + namespace: "citestns" spec: serviceAccountName: test-enterprise-values-mimir securityContext: diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/gateway/gateway-svc.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/gateway/gateway-svc.yaml index 4ab6feb40f..58c21d92cd 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/gateway/gateway-svc.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/gateway/gateway-svc.yaml @@ -12,6 +12,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml index 2cf9208045..cd0ccedff6 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml @@ -10,6 +10,7 @@ metadata: app.kubernetes.io/component: gossip-ring app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" spec: type: ClusterIP clusterIP: None diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ingester/ingester-pdb.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ingester/ingester-pdb.yaml index 2c3c676902..405f652399 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ingester/ingester-pdb.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ingester/ingester-pdb.yaml @@ -11,6 +11,7 @@ metadata: app.kubernetes.io/part-of: memberlist app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" spec: selector: matchLabels: diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml index 0416de3aed..bc145f6d76 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: podManagementPolicy: Parallel replicas: 3 @@ -34,6 +35,7 @@ spec: app.kubernetes.io/component: ingester app.kubernetes.io/part-of: memberlist annotations: + namespace: "citestns" spec: serviceAccountName: test-enterprise-values-mimir securityContext: diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ingester/ingester-svc-headless.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ingester/ingester-svc-headless.yaml index 1a3d744b88..560bef4351 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ingester/ingester-svc-headless.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ingester/ingester-svc-headless.yaml @@ -14,6 +14,7 @@ metadata: prometheus.io/service-monitor: "false" annotations: {} + namespace: "citestns" spec: type: ClusterIP clusterIP: None diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ingester/ingester-svc.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ingester/ingester-svc.yaml index 7d1426c14c..61d2afbc89 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ingester/ingester-svc.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ingester/ingester-svc.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/license-secret.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/license-secret.yaml index 5dcbd5397d..484670acdc 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/license-secret.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/license-secret.yaml @@ -9,5 +9,6 @@ metadata: app.kubernetes.io/instance: test-enterprise-values app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" data: license.jwt: Tk9UQVZBTElETElDRU5TRQ== diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/mimir-config.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/mimir-config.yaml index 50c3f1dde0..6a4788a196 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/mimir-config.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/mimir-config.yaml @@ -9,5 +9,6 @@ metadata: app.kubernetes.io/instance: test-enterprise-values app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" data: mimir.yaml: YWN0aXZpdHlfdHJhY2tlcjoKICBmaWxlcGF0aDogL2RhdGEvbWV0cmljcy1hY3Rpdml0eS5sb2cKYWRtaW5fYXBpOgogIGxlYWRlcl9lbGVjdGlvbjoKICAgIGVuYWJsZWQ6IHRydWUKICAgIHJpbmc6CiAgICAgIGt2c3RvcmU6CiAgICAgICAgc3RvcmU6IG1lbWJlcmxpc3QKYWRtaW5fY2xpZW50OgogIHN0b3JhZ2U6CiAgICBzMzoKICAgICAgYWNjZXNzX2tleV9pZDogZ3JhZmFuYS1taW1pcgogICAgICBidWNrZXRfbmFtZTogZW50ZXJwcmlzZS1tZXRyaWNzLWFkbWluCiAgICAgIGVuZHBvaW50OiB0ZXN0LWVudGVycHJpc2UtdmFsdWVzLW1pbmlvLmNpdGVzdG5zLnN2Yzo5MDAwCiAgICAgIGluc2VjdXJlOiB0cnVlCiAgICAgIHNlY3JldF9hY2Nlc3Nfa2V5OiBzdXBlcnNlY3JldAogICAgdHlwZTogczMKYWxlcnRtYW5hZ2VyOgogIGRhdGFfZGlyOiAvZGF0YQogIGVuYWJsZV9hcGk6IHRydWUKICBleHRlcm5hbF91cmw6IC9hbGVydG1hbmFnZXIKYWxlcnRtYW5hZ2VyX3N0b3JhZ2U6CiAgYmFja2VuZDogczMKICBzMzoKICAgIGFjY2Vzc19rZXlfaWQ6IGdyYWZhbmEtbWltaXIKICAgIGJ1Y2tldF9uYW1lOiBtaW1pci1ydWxlcgogICAgZW5kcG9pbnQ6IHRlc3QtZW50ZXJwcmlzZS12YWx1ZXMtbWluaW8uY2l0ZXN0bnMuc3ZjOjkwMDAKICAgIGluc2VjdXJlOiB0cnVlCiAgICBzZWNyZXRfYWNjZXNzX2tleTogc3VwZXJzZWNyZXQKYXV0aDoKICB0eXBlOiBlbnRlcnByaXNlCmJsb2Nrc19zdG9yYWdlOgogIGJhY2tlbmQ6IHMzCiAgYnVja2V0X3N0b3JlOgogICAgc3luY19kaXI6IC9kYXRhL3RzZGItc3luYwogIHMzOgogICAgYWNjZXNzX2tleV9pZDogZ3JhZmFuYS1taW1pcgogICAgYnVja2V0X25hbWU6IG1pbWlyLXRzZGIKICAgIGVuZHBvaW50OiB0ZXN0LWVudGVycHJpc2UtdmFsdWVzLW1pbmlvLmNpdGVzdG5zLnN2Yzo5MDAwCiAgICBpbnNlY3VyZTogdHJ1ZQogICAgc2VjcmV0X2FjY2Vzc19rZXk6IHN1cGVyc2VjcmV0CiAgdHNkYjoKICAgIGRpcjogL2RhdGEvdHNkYgpjbHVzdGVyX25hbWU6IHRlc3QtZW50ZXJwcmlzZS12YWx1ZXMKY29tcGFjdG9yOgogIGRhdGFfZGlyOiAvZGF0YQpmcm9udGVuZDoKICBhbGlnbl9xdWVyaWVzX3dpdGhfc3RlcDogdHJ1ZQogIGxvZ19xdWVyaWVzX2xvbmdlcl90aGFuOiAxMHMKZnJvbnRlbmRfd29ya2VyOgogIGZyb250ZW5kX2FkZHJlc3M6IHRlc3QtZW50ZXJwcmlzZS12YWx1ZXMtbWltaXItcXVlcnktZnJvbnRlbmQtaGVhZGxlc3MuY2l0ZXN0bnMuc3ZjOjkwOTUKZ2F0ZXdheToKICBwcm94eToKICAgIGFkbWluX2FwaToKICAgICAgdXJsOiBodHRwOi8vdGVzdC1lbnRlcnByaXNlLXZhbHVlcy1taW1pci1hZG1pbi1hcGkuY2l0ZXN0bnMuc3ZjOjgwODAKICAgIGFsZXJ0bWFuYWdlcjoKICAgICAgdXJsOiBodHRwOi8vdGVzdC1lbnRlcnByaXNlLXZhbHVlcy1taW1pci1hbGVydG1hbmFnZXIuY2l0ZXN0bnMuc3ZjOjgwODAKICAgIGNvbXBhY3RvcjoKICAgICAgdXJsOiBodHRwOi8vdGVzdC1lbnRlcnByaXNlLXZhbHVlcy1taW1pci1jb21wYWN0b3IuY2l0ZXN0bnMuc3ZjOjgwODAKICAgIGRlZmF1bHQ6CiAgICAgIHVybDogaHR0cDovL3Rlc3QtZW50ZXJwcmlzZS12YWx1ZXMtbWltaXItYWRtaW4tYXBpLmNpdGVzdG5zLnN2Yzo4MDgwCiAgICBkaXN0cmlidXRvcjoKICAgICAgdXJsOiBkbnM6Ly8vdGVzdC1lbnRlcnByaXNlLXZhbHVlcy1taW1pci1kaXN0cmlidXRvci1oZWFkbGVzcy5jaXRlc3Rucy5zdmMuY2x1c3Rlci5sb2NhbDo5MDk1CiAgICBpbmdlc3RlcjoKICAgICAgdXJsOiBodHRwOi8vdGVzdC1lbnRlcnByaXNlLXZhbHVlcy1taW1pci1pbmdlc3Rlci5jaXRlc3Rucy5zdmM6ODA4MAogICAgcXVlcnlfZnJvbnRlbmQ6CiAgICAgIHVybDogaHR0cDovL3Rlc3QtZW50ZXJwcmlzZS12YWx1ZXMtbWltaXItcXVlcnktZnJvbnRlbmQuY2l0ZXN0bnMuc3ZjOjgwODAKICAgIHJ1bGVyOgogICAgICB1cmw6IGh0dHA6Ly90ZXN0LWVudGVycHJpc2UtdmFsdWVzLW1pbWlyLXJ1bGVyLmNpdGVzdG5zLnN2Yzo4MDgwCiAgICBzdG9yZV9nYXRld2F5OgogICAgICB1cmw6IGh0dHA6Ly90ZXN0LWVudGVycHJpc2UtdmFsdWVzLW1pbWlyLXN0b3JlLWdhdGV3YXkuY2l0ZXN0bnMuc3ZjOjgwODAKaW5nZXN0ZXI6CiAgcmluZzoKICAgIGZpbmFsX3NsZWVwOiAwcwogICAgbnVtX3Rva2VuczogNTEyCiAgICB1bnJlZ2lzdGVyX29uX3NodXRkb3duOiBmYWxzZQppbmdlc3Rlcl9jbGllbnQ6CiAgZ3JwY19jbGllbnRfY29uZmlnOgogICAgbWF4X3JlY3ZfbXNnX3NpemU6IDEwNDg1NzYwMAogICAgbWF4X3NlbmRfbXNnX3NpemU6IDEwNDg1NzYwMAppbnN0cnVtZW50YXRpb246CiAgZGlzdHJpYnV0b3JfY2xpZW50OgogICAgYWRkcmVzczogZG5zOi8vL3Rlc3QtZW50ZXJwcmlzZS12YWx1ZXMtbWltaXItZGlzdHJpYnV0b3ItaGVhZGxlc3MuY2l0ZXN0bnMuc3ZjLmNsdXN0ZXIubG9jYWw6OTA5NQogIGVuYWJsZWQ6IHRydWUKbGljZW5zZToKICBwYXRoOiAvbGljZW5zZS9saWNlbnNlLmp3dApsaW1pdHM6IHt9Cm1lbWJlcmxpc3Q6CiAgYWJvcnRfaWZfY2x1c3Rlcl9qb2luX2ZhaWxzOiBmYWxzZQogIGNvbXByZXNzaW9uX2VuYWJsZWQ6IGZhbHNlCiAgam9pbl9tZW1iZXJzOgogIC0gdGVzdC1lbnRlcnByaXNlLXZhbHVlcy1taW1pci1nb3NzaXAtcmluZwpydWxlcjoKICBhbGVydG1hbmFnZXJfdXJsOiBkbnNzcnZub2EraHR0cDovL19odHRwLW1ldHJpY3MuX3RjcC50ZXN0LWVudGVycHJpc2UtdmFsdWVzLW1pbWlyLWFsZXJ0bWFuYWdlci1oZWFkbGVzcy5jaXRlc3Rucy5zdmMuY2x1c3Rlci5sb2NhbC9hbGVydG1hbmFnZXIKICBlbmFibGVfYXBpOiB0cnVlCiAgcnVsZV9wYXRoOiAvZGF0YQpydWxlcl9zdG9yYWdlOgogIGJhY2tlbmQ6IHMzCiAgczM6CiAgICBhY2Nlc3Nfa2V5X2lkOiBncmFmYW5hLW1pbWlyCiAgICBidWNrZXRfbmFtZTogbWltaXItcnVsZXIKICAgIGVuZHBvaW50OiB0ZXN0LWVudGVycHJpc2UtdmFsdWVzLW1pbmlvLmNpdGVzdG5zLnN2Yzo5MDAwCiAgICBpbnNlY3VyZTogdHJ1ZQogICAgc2VjcmV0X2FjY2Vzc19rZXk6IHN1cGVyc2VjcmV0CnJ1bnRpbWVfY29uZmlnOgogIGZpbGU6IC92YXIvbWltaXIvcnVudGltZS55YW1sCnNlcnZlcjoKICBncnBjX3NlcnZlcl9tYXhfY29uY3VycmVudF9zdHJlYW1zOiAxMDAwCiAgZ3JwY19zZXJ2ZXJfbWF4X3JlY3ZfbXNnX3NpemU6IDEwNDg1NzYwMAogIGdycGNfc2VydmVyX21heF9zZW5kX21zZ19zaXplOiAxMDQ4NTc2MDA= diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml index ec56f13de7..609166400e 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml @@ -12,6 +12,7 @@ metadata: app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm name: test-enterprise-values-mimir-overrides-exporter + namespace: "citestns" spec: replicas: 1 selector: @@ -33,6 +34,7 @@ spec: app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: overrides-exporter annotations: + namespace: "citestns" spec: serviceAccountName: test-enterprise-values-mimir securityContext: diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml index 63c1a1bf7a..f458277d3b 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml @@ -12,6 +12,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/podsecuritypolicy.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/podsecuritypolicy.yaml index 8f8bbf9ab4..0b38fb74a9 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/podsecuritypolicy.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/podsecuritypolicy.yaml @@ -9,6 +9,7 @@ metadata: app.kubernetes.io/instance: test-enterprise-values app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" spec: privileged: false allowPrivilegeEscalation: false diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/querier/querier-dep.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/querier/querier-dep.yaml index 743d155cad..5b4993da2d 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/querier/querier-dep.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/querier/querier-dep.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: replicas: 2 selector: diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/querier/querier-svc.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/querier/querier-svc.yaml index 02218c4077..9389b24772 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/querier/querier-svc.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/querier/querier-svc.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml index f6e68eb670..2df79d7117 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml @@ -12,6 +12,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: replicas: 1 selector: @@ -33,6 +34,7 @@ spec: app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: query-frontend annotations: + namespace: "citestns" spec: serviceAccountName: test-enterprise-values-mimir securityContext: diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc-headless.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc-headless.yaml index d780f9004f..443e5a0dfd 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc-headless.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc-headless.yaml @@ -13,6 +13,7 @@ metadata: prometheus.io/service-monitor: "false" annotations: {} + namespace: "citestns" spec: type: ClusterIP clusterIP: None diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml index 6e876644fd..eb188dd9f2 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml @@ -12,6 +12,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/role.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/role.yaml index 815dff2762..c2a140445c 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/role.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/role.yaml @@ -9,6 +9,7 @@ metadata: app.kubernetes.io/instance: test-enterprise-values app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" rules: - apiGroups: ['extensions'] resources: ['podsecuritypolicies'] diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/rolebinding.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/rolebinding.yaml index 2c22b7d063..f91362f24f 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/rolebinding.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/rolebinding.yaml @@ -9,6 +9,7 @@ metadata: app.kubernetes.io/instance: test-enterprise-values app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" roleRef: apiGroup: rbac.authorization.k8s.io kind: Role diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml index 72ea32258d..1039ab46d8 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: replicas: 1 selector: @@ -35,6 +36,7 @@ spec: app.kubernetes.io/component: ruler app.kubernetes.io/part-of: memberlist annotations: + namespace: "citestns" spec: serviceAccountName: test-enterprise-values-mimir securityContext: diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ruler/ruler-svc.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ruler/ruler-svc.yaml index 0644ed413e..3bbd2f473c 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ruler/ruler-svc.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/ruler/ruler-svc.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/runtime-configmap.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/runtime-configmap.yaml index 8147fbfdce..5fe9e6e7e0 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/runtime-configmap.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/runtime-configmap.yaml @@ -9,6 +9,7 @@ metadata: app.kubernetes.io/instance: test-enterprise-values app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" data: runtime.yaml: | diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/serviceaccount.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/serviceaccount.yaml index 9dc0ebfc0f..da972c4c80 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/serviceaccount.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/serviceaccount.yaml @@ -11,3 +11,4 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml index 1d8a9a0299..4482ed7ef7 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml @@ -11,6 +11,7 @@ metadata: app.kubernetes.io/part-of: memberlist app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" spec: selector: matchLabels: diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml index 0622da8e20..438940bc6c 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: replicas: 1 selector: @@ -33,6 +34,7 @@ spec: app.kubernetes.io/component: store-gateway app.kubernetes.io/part-of: memberlist annotations: + namespace: "citestns" spec: serviceAccountName: test-enterprise-values-mimir securityContext: diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml index f59ee8f60e..3c392875a0 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml @@ -14,6 +14,7 @@ metadata: prometheus.io/service-monitor: "false" annotations: {} + namespace: "citestns" spec: type: ClusterIP clusterIP: None diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml index ba5ad1dfae..5fdb654b7c 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml index d01f9abc26..c6cf327d49 100644 --- a/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml +++ b/operations/helm/tests/test-enterprise-values-generated/mimir-distributed/templates/tokengen/tokengen-job.yaml @@ -12,6 +12,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: "helm.sh/hook": post-install + namespace: "citestns" spec: backoffLimit: 6 completions: 1 @@ -25,6 +26,7 @@ spec: app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: tokengen + namespace: "citestns" spec: serviceAccountName: test-enterprise-values-mimir securityContext: diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml index 38bab765b8..1e91edc521 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/alertmanager/alertmanager-statefulset.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: replicas: 1 selector: @@ -34,6 +35,7 @@ spec: app.kubernetes.io/part-of: memberlist annotations: minio-secret-version: "42" + namespace: "citestns" spec: serviceAccountName: test-oss-values-mimir securityContext: diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml index 4495fca80a..30716c12b4 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc-headless.yaml @@ -14,6 +14,7 @@ metadata: prometheus.io/service-monitor: "false" annotations: {} + namespace: "citestns" spec: type: ClusterIP clusterIP: None diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml index d45b5b52a2..2cd0defafe 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/alertmanager/alertmanager-svc.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml index 9c3e073b88..321223ed45 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/compactor/compactor-statefulset.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: replicas: 1 selector: @@ -34,6 +35,7 @@ spec: app.kubernetes.io/part-of: memberlist annotations: minio-secret-version: "42" + namespace: "citestns" spec: serviceAccountName: test-oss-values-mimir securityContext: diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/compactor/compactor-svc.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/compactor/compactor-svc.yaml index 8d6e22b4d9..33dee0802f 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/compactor/compactor-svc.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/compactor/compactor-svc.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml index cd5cc73e68..3722bba8d0 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/distributor/distributor-dep.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: replicas: 1 selector: @@ -36,6 +37,7 @@ spec: app.kubernetes.io/part-of: memberlist annotations: minio-secret-version: "42" + namespace: "citestns" spec: serviceAccountName: test-oss-values-mimir securityContext: diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/distributor/distributor-svc-headless.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/distributor/distributor-svc-headless.yaml index ca4fc10b5f..6c8e803869 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/distributor/distributor-svc-headless.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/distributor/distributor-svc-headless.yaml @@ -14,6 +14,7 @@ metadata: prometheus.io/service-monitor: "false" annotations: {} + namespace: "citestns" spec: type: ClusterIP clusterIP: None diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/distributor/distributor-svc.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/distributor/distributor-svc.yaml index e3ada101b3..0c2cd119d3 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/distributor/distributor-svc.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/distributor/distributor-svc.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml index 4feb023f15..10137d0521 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/gossip-ring/gossip-ring-svc.yaml @@ -10,6 +10,7 @@ metadata: app.kubernetes.io/component: gossip-ring app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" spec: type: ClusterIP clusterIP: None diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ingester/ingester-pdb.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ingester/ingester-pdb.yaml index e10039124c..0b658b8b11 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ingester/ingester-pdb.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ingester/ingester-pdb.yaml @@ -11,6 +11,7 @@ metadata: app.kubernetes.io/part-of: memberlist app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" spec: selector: matchLabels: diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml index 8889b57812..70bed06bce 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ingester/ingester-statefulset.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: podManagementPolicy: Parallel replicas: 3 @@ -35,6 +36,7 @@ spec: app.kubernetes.io/part-of: memberlist annotations: minio-secret-version: "42" + namespace: "citestns" spec: serviceAccountName: test-oss-values-mimir securityContext: diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ingester/ingester-svc-headless.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ingester/ingester-svc-headless.yaml index e4a89c3dda..0b36f31b31 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ingester/ingester-svc-headless.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ingester/ingester-svc-headless.yaml @@ -14,6 +14,7 @@ metadata: prometheus.io/service-monitor: "false" annotations: {} + namespace: "citestns" spec: type: ClusterIP clusterIP: None diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ingester/ingester-svc.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ingester/ingester-svc.yaml index 7fcac54b0a..1a45428b81 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ingester/ingester-svc.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ingester/ingester-svc.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/mimir-config.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/mimir-config.yaml index 9e4acf9b09..6df56c5260 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/mimir-config.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/mimir-config.yaml @@ -9,6 +9,7 @@ metadata: app.kubernetes.io/instance: test-oss-values app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" data: mimir.yaml: | diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/minio-secrets.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/minio-secrets.yaml index a1693143ea..f8e72387d3 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/minio-secrets.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/minio-secrets.yaml @@ -9,6 +9,7 @@ metadata: app.kubernetes.io/instance: test-oss-values app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" type: Opaque data: MINIO_ACCESS_KEY_ID: Z3JhZmFuYS1taW1pcg== diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml index 69a18ad74d..54c5b2f4ad 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml @@ -10,6 +10,7 @@ metadata: app.kubernetes.io/component: nginx app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" data: nginx.conf: | worker_processes 5; ## Default: 1 diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/nginx/nginx-dep.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/nginx/nginx-dep.yaml index 7c9a547c1d..2b5319f8a4 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/nginx/nginx-dep.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/nginx/nginx-dep.yaml @@ -12,6 +12,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: replicas: 1 strategy: @@ -31,6 +32,7 @@ spec: app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm app.kubernetes.io/component: nginx + namespace: "citestns" spec: serviceAccountName: test-oss-values-mimir securityContext: diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/nginx/nginx-svc.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/nginx/nginx-svc.yaml index 819f3b4b94..70b3140c13 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/nginx/nginx-svc.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/nginx/nginx-svc.yaml @@ -12,6 +12,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml index 597e29c8f2..fc9c69c1c0 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml @@ -12,6 +12,7 @@ metadata: app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm name: test-oss-values-mimir-overrides-exporter + namespace: "citestns" spec: replicas: 1 selector: @@ -34,6 +35,7 @@ spec: app.kubernetes.io/component: overrides-exporter annotations: minio-secret-version: "42" + namespace: "citestns" spec: serviceAccountName: test-oss-values-mimir securityContext: diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml index 8aaffcc6fb..1f5d330e22 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/overrides-exporter/overrides-exporter-svc.yaml @@ -12,6 +12,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/podsecuritypolicy.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/podsecuritypolicy.yaml index 4af0af992b..9f52f380c4 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/podsecuritypolicy.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/podsecuritypolicy.yaml @@ -9,6 +9,7 @@ metadata: app.kubernetes.io/instance: test-oss-values app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" spec: privileged: false allowPrivilegeEscalation: false diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/querier/querier-dep.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/querier/querier-dep.yaml index c3324c49c0..89b39ecb46 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/querier/querier-dep.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/querier/querier-dep.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: replicas: 2 selector: diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/querier/querier-svc.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/querier/querier-svc.yaml index a6f2692502..0aea263961 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/querier/querier-svc.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/querier/querier-svc.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml index eb1fb0ddf6..4b3b2d2ab0 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml @@ -12,6 +12,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: replicas: 1 selector: @@ -34,6 +35,7 @@ spec: app.kubernetes.io/component: query-frontend annotations: minio-secret-version: "42" + namespace: "citestns" spec: serviceAccountName: test-oss-values-mimir securityContext: diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc-headless.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc-headless.yaml index a151afe04f..44d5889241 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc-headless.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc-headless.yaml @@ -13,6 +13,7 @@ metadata: prometheus.io/service-monitor: "false" annotations: {} + namespace: "citestns" spec: type: ClusterIP clusterIP: None diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml index a10ba83d98..884a0d30e3 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/query-frontend/query-frontend-svc.yaml @@ -12,6 +12,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/role.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/role.yaml index c4005a2043..7fe2344f68 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/role.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/role.yaml @@ -9,6 +9,7 @@ metadata: app.kubernetes.io/instance: test-oss-values app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" rules: - apiGroups: ['extensions'] resources: ['podsecuritypolicies'] diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/rolebinding.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/rolebinding.yaml index 7ee12297fb..ca2c9cfbc7 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/rolebinding.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/rolebinding.yaml @@ -9,6 +9,7 @@ metadata: app.kubernetes.io/instance: test-oss-values app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" roleRef: apiGroup: rbac.authorization.k8s.io kind: Role diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml index 612db7586e..a548446829 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ruler/ruler-dep.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: replicas: 1 selector: @@ -36,6 +37,7 @@ spec: app.kubernetes.io/part-of: memberlist annotations: minio-secret-version: "42" + namespace: "citestns" spec: serviceAccountName: test-oss-values-mimir securityContext: diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ruler/ruler-svc.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ruler/ruler-svc.yaml index 6778b969f5..3f4aa81c34 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ruler/ruler-svc.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/ruler/ruler-svc.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/runtime-configmap.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/runtime-configmap.yaml index 0093cc8f19..136cf6ed11 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/runtime-configmap.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/runtime-configmap.yaml @@ -9,6 +9,7 @@ metadata: app.kubernetes.io/instance: test-oss-values app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" data: runtime.yaml: | diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/serviceaccount.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/serviceaccount.yaml index ac87d8afe5..79dacaa3b3 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/serviceaccount.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/serviceaccount.yaml @@ -11,3 +11,4 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml index e55c6b6a33..73c4c8f2c6 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/store-gateway/store-gateway-pdb.yaml @@ -11,6 +11,7 @@ metadata: app.kubernetes.io/part-of: memberlist app.kubernetes.io/version: "2.1.0" app.kubernetes.io/managed-by: Helm + namespace: "citestns" spec: selector: matchLabels: diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml index 792f1c93b3..e2e23fd62a 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: replicas: 1 selector: @@ -34,6 +35,7 @@ spec: app.kubernetes.io/part-of: memberlist annotations: minio-secret-version: "42" + namespace: "citestns" spec: serviceAccountName: test-oss-values-mimir securityContext: diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml index be80da36fa..4a01afa846 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc-headless.yaml @@ -14,6 +14,7 @@ metadata: prometheus.io/service-monitor: "false" annotations: {} + namespace: "citestns" spec: type: ClusterIP clusterIP: None diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml index d3006dee8f..34d3fa94ab 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/store-gateway/store-gateway-svc.yaml @@ -13,6 +13,7 @@ metadata: app.kubernetes.io/managed-by: Helm annotations: {} + namespace: "citestns" spec: type: ClusterIP ports: From ce2b146e835060000c418a84445361d9c3b11c99 Mon Sep 17 00:00:00 2001 From: Joshua Hesketh Date: Tue, 21 Jun 2022 22:40:29 +1000 Subject: [PATCH 18/63] Docs: Update mixin building instructions (#2163) * Docs: Update mixin building instructions The instructions for building mixin's inside of docker referred to the wrong steps. * Docs: BUILD_IN_CONTAINER is used by default No need to have separate instructions on how to build mixin's in a container as it is the default method. Also therefore remove the unnecessary dependencies. --- .../installing-dashboards-and-alerts.md | 19 ++++--------------- 1 file changed, 4 insertions(+), 15 deletions(-) diff --git a/docs/sources/operators-guide/monitoring-grafana-mimir/installing-dashboards-and-alerts.md b/docs/sources/operators-guide/monitoring-grafana-mimir/installing-dashboards-and-alerts.md index 37709e3e4c..44260b3338 100644 --- a/docs/sources/operators-guide/monitoring-grafana-mimir/installing-dashboards-and-alerts.md +++ b/docs/sources/operators-guide/monitoring-grafana-mimir/installing-dashboards-and-alerts.md @@ -37,24 +37,13 @@ If you choose this option, you can change the configuration to match your deploy git clone https://github.com/grafana/mimir.git ``` 2. Review the mixin configuration at `operations/mimir-mixin/config.libsonnet`, and apply your changes if necessary. -3. Install dependencies: - ```bash - go install github.com/monitoring-mixins/mixtool/cmd/mixtool@latest - go install github.com/jsonnet-bundler/jsonnet-bundler/cmd/jb@latest - ``` -4. Compile the mixin +3. Compile the mixin ```bash make build-mixin ``` -5. Import the dashboards saved at `operations/mimir-mixin-compiled/dashboards/` in [Grafana](https://grafana.com/docs/grafana/latest/dashboards/export-import/#import-dashboard) -6. Install the recording rules saved at `operations/mimir-mixin-compiled/rules.yaml` in your Prometheus -7. Install the alerts saved at `operations/mimir-mixin-compiled/alerts.yaml` in your Prometheus - -Alternatively, you can compile the mixin in a container. To do so, replace the previous two steps with the following command: - -```bash -make BUILD_IN_CONTAINER=true build-mixin -``` +4. Import the dashboards saved at `operations/mimir-mixin-compiled/dashboards/` in [Grafana](https://grafana.com/docs/grafana/latest/dashboards/export-import/#import-dashboard) +5. Install the recording rules saved at `operations/mimir-mixin-compiled/rules.yaml` in your Prometheus +6. Install the alerts saved at `operations/mimir-mixin-compiled/alerts.yaml` in your Prometheus ## Install dashboards from Jsonnet mixin From 779f277991cdc37624096af743a6c535d7dbd1c4 Mon Sep 17 00:00:00 2001 From: Steve Simpson Date: Tue, 21 Jun 2022 15:13:35 +0200 Subject: [PATCH 19/63] Jsonnet: Fix disabling shuffle sharding on the read path whilst keeping it enabled on write path. (#2164) The Jsonnet allows for the disabling of shuffle sharding on the read path whilst keeping it enabled on the write path. However, the correct command line arguments are not being passed to queriers for this to work as advertised. --- CHANGELOG.md | 1 + ...sharding-read-path-disabled-generated.yaml | 1781 +++++++++++++++++ ...huffle-sharding-read-path-disabled.jsonnet | 29 + operations/mimir/shuffle-sharding.libsonnet | 5 + 4 files changed, 1816 insertions(+) create mode 100644 operations/mimir-tests/test-shuffle-sharding-read-path-disabled-generated.yaml create mode 100644 operations/mimir-tests/test-shuffle-sharding-read-path-disabled.jsonnet diff --git a/CHANGELOG.md b/CHANGELOG.md index 4432955817..47c1a75c6d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -78,6 +78,7 @@ * [FEATURE] Jsonnet: Add support for ruler remote evaluation mode (`ruler_remote_evaluation_enabled`), which deploys and uses a dedicated query path for rule evaluation. This enables the benefits of the query-frontend for rule evaluation, such as query sharding. #2073 * [ENHANCEMENT] Added `compactor` service, that can be used to route requests directly to compactor (e.g. admin UI). #2063 * [ENHANCEMENT] Added a `consul_enabled` configuration option to provide the ability to disable consul. It is automatically set to false when `memberlist_ring_enabled` is true and `multikv_migration_enabled` (used for migration from Consul to memberlist) is not set. #2093 #2152 +* [BUGFIX] Querier: Fix disabling shuffle sharding on the read path whilst keeping it enabled on write path. #2164 ### Mimirtool diff --git a/operations/mimir-tests/test-shuffle-sharding-read-path-disabled-generated.yaml b/operations/mimir-tests/test-shuffle-sharding-read-path-disabled-generated.yaml new file mode 100644 index 0000000000..f879e5005d --- /dev/null +++ b/operations/mimir-tests/test-shuffle-sharding-read-path-disabled-generated.yaml @@ -0,0 +1,1781 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: default +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: alertmanager-pdb + name: alertmanager-pdb + namespace: default +spec: + maxUnavailable: 1 + selector: + matchLabels: + name: alertmanager +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: ingester-pdb + name: ingester-pdb + namespace: default +spec: + maxUnavailable: 1 + selector: + matchLabels: + name: ingester +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: store-gateway-pdb + name: store-gateway-pdb + namespace: default +spec: + maxUnavailable: 2 + selector: + matchLabels: + name: store-gateway +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: consul-sidekick + namespace: default +--- +apiVersion: v1 +data: + consul-config.json: '{"leave_on_terminate": true, "raft_snapshot_threshold": 128, + "raft_trailing_logs": 10000, "telemetry": {"dogstatsd_addr": "127.0.0.1:9125"}}' + mapping: | + mappings: + - match: consul.*.runtime.* + name: consul_runtime + labels: + type: $2 + - match: consul.runtime.total_gc_pause_ns + name: consul_runtime_total_gc_pause_ns + labels: + type: $2 + - match: consul.consul.health.service.query-tag.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3 + - match: consul.consul.health.service.query-tag.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4 + - match: consul.consul.health.service.query-tag.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11.$12 + - match: consul.consul.catalog.deregister + name: consul_catalog_deregister + labels: {} + - match: consul.consul.dns.domain_query.*.*.*.*.* + name: consul_dns_domain_query + labels: + query: $1.$2.$3.$4.$5 + - match: consul.consul.health.service.not-found.* + name: consul_health_service_not_found + labels: + query: $1 + - match: consul.consul.health.service.query.* + name: consul_health_service_query + labels: + query: $1 + - match: consul.*.memberlist.health.score + name: consul_memberlist_health_score + labels: {} + - match: consul.serf.queue.* + name: consul_serf_events + labels: + type: $1 + - match: consul.serf.snapshot.appendLine + name: consul_serf_snapshot_appendLine + labels: + type: $1 + - match: consul.serf.coordinate.adjustment-ms + name: consul_serf_coordinate_adjustment_ms + labels: {} + - match: consul.consul.rpc.query + name: consul_rpc_query + labels: {} + - match: consul.*.consul.session_ttl.active + name: consul_session_ttl_active + labels: {} + - match: consul.raft.rpc.* + name: consul_raft_rpc + labels: + type: $1 + - match: consul.raft.rpc.appendEntries.storeLogs + name: consul_raft_rpc_appendEntries_storeLogs + labels: + type: $1 + - match: consul.consul.fsm.persist + name: consul_fsm_persist + labels: {} + - match: consul.raft.fsm.apply + name: consul_raft_fsm_apply + labels: {} + - match: consul.raft.leader.lastContact + name: consul_raft_leader_lastcontact + labels: {} + - match: consul.raft.leader.dispatchLog + name: consul_raft_leader_dispatchLog + labels: {} + - match: consul.raft.commitTime + name: consul_raft_commitTime + labels: {} + - match: consul.raft.replication.appendEntries.logs.*.*.*.* + name: consul_raft_replication_appendEntries_logs + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.raft.replication.appendEntries.rpc.*.*.*.* + name: consul_raft_replication_appendEntries_rpc + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.raft.replication.heartbeat.*.*.*.* + name: consul_raft_replication_heartbeat + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.consul.rpc.request + name: consul_rpc_requests + labels: {} + - match: consul.consul.rpc.accept_conn + name: consul_rpc_accept_conn + labels: {} + - match: consul.memberlist.udp.* + name: consul_memberlist_udp + labels: + type: $1 + - match: consul.memberlist.tcp.* + name: consul_memberlist_tcp + labels: + type: $1 + - match: consul.memberlist.gossip + name: consul_memberlist_gossip + labels: {} + - match: consul.memberlist.probeNode + name: consul_memberlist_probenode + labels: {} + - match: consul.memberlist.pushPullNode + name: consul_memberlist_pushpullnode + labels: {} + - match: consul.http.* + name: consul_http_request + labels: + method: $1 + path: / + - match: consul.http.*.* + name: consul_http_request + labels: + method: $1 + path: /$2 + - match: consul.http.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3 + - match: consul.http.*.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3/$4 + - match: consul.http.*.*.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3/$4/$5 + - match: consul.consul.leader.barrier + name: consul_leader_barrier + labels: {} + - match: consul.consul.leader.reconcileMember + name: consul_leader_reconcileMember + labels: {} + - match: consul.consul.leader.reconcile + name: consul_leader_reconcile + labels: {} + - match: consul.consul.fsm.coordinate.batch-update + name: consul_fsm_coordinate_batch_update + labels: {} + - match: consul.consul.fsm.autopilot + name: consul_fsm_autopilot + labels: {} + - match: consul.consul.fsm.kvs.cas + name: consul_fsm_kvs_cas + labels: {} + - match: consul.consul.fsm.register + name: consul_fsm_register + labels: {} + - match: consul.consul.fsm.deregister + name: consul_fsm_deregister + labels: {} + - match: consul.consul.fsm.tombstone.reap + name: consul_fsm_tombstone_reap + labels: {} + - match: consul.consul.catalog.register + name: consul_catalog_register + labels: {} + - match: consul.consul.catalog.deregister + name: consul_catalog_deregister + labels: {} + - match: consul.consul.leader.reapTombstones + name: consul_leader_reapTombstones + labels: {} +kind: ConfigMap +metadata: + name: consul + namespace: default +--- +apiVersion: v1 +data: + overrides.yaml: | + overrides: {} +kind: ConfigMap +metadata: + name: overrides + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: consul-sidekick + namespace: default +rules: +- apiGroups: + - "" + - extensions + - apps + resources: + - pods + - replicasets + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: consul-sidekick + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: consul-sidekick +subjects: +- kind: ServiceAccount + name: consul-sidekick + namespace: default +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: alertmanager + name: alertmanager + namespace: default +spec: + clusterIP: None + ports: + - name: alertmanager-http-metrics + port: 8080 + targetPort: 8080 + - name: alertmanager-grpc + port: 9095 + targetPort: 9095 + selector: + name: alertmanager +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: compactor + name: compactor + namespace: default +spec: + clusterIP: None + ports: + - name: compactor-http-metrics + port: 8080 + targetPort: 8080 + - name: compactor-grpc + port: 9095 + targetPort: 9095 + selector: + name: compactor +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: consul + name: consul + namespace: default +spec: + ports: + - name: consul-server + port: 8300 + targetPort: 8300 + - name: consul-serf + port: 8301 + targetPort: 8301 + - name: consul-client + port: 8400 + targetPort: 8400 + - name: consul-api + port: 8500 + targetPort: 8500 + - name: statsd-exporter-http-metrics + port: 8000 + targetPort: 8000 + - name: consul-exporter-http-metrics + port: 9107 + targetPort: 9107 + selector: + name: consul +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: distributor + name: distributor + namespace: default +spec: + clusterIP: None + ports: + - name: distributor-http-metrics + port: 8080 + targetPort: 8080 + - name: distributor-grpc + port: 9095 + targetPort: 9095 + selector: + name: distributor +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ingester + name: ingester + namespace: default +spec: + ports: + - name: ingester-http-metrics + port: 8080 + targetPort: 8080 + - name: ingester-grpc + port: 9095 + targetPort: 9095 + selector: + name: ingester +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached + name: memcached + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-frontend + name: memcached-frontend + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-index-queries + name: memcached-index-queries + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-index-queries +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-metadata + name: memcached-metadata + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-metadata +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: querier + name: querier + namespace: default +spec: + ports: + - name: querier-http-metrics + port: 8080 + targetPort: 8080 + - name: querier-grpc + port: 9095 + targetPort: 9095 + selector: + name: querier +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-frontend + name: query-frontend + namespace: default +spec: + ports: + - name: query-frontend-http-metrics + port: 8080 + targetPort: 8080 + - name: query-frontend-grpc + port: 9095 + targetPort: 9095 + selector: + name: query-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-frontend + name: query-frontend-discovery + namespace: default +spec: + clusterIP: None + ports: + - name: query-frontend-http-metrics + port: 8080 + targetPort: 8080 + - name: query-frontend-grpc + port: 9095 + targetPort: 9095 + publishNotReadyAddresses: true + selector: + name: query-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-scheduler + name: query-scheduler + namespace: default +spec: + ports: + - name: query-scheduler-http-metrics + port: 8080 + targetPort: 8080 + - name: query-scheduler-grpc + port: 9095 + targetPort: 9095 + selector: + name: query-scheduler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-scheduler + name: query-scheduler-discovery + namespace: default +spec: + clusterIP: None + ports: + - name: query-scheduler-http-metrics + port: 8080 + targetPort: 8080 + - name: query-scheduler-grpc + port: 9095 + targetPort: 9095 + publishNotReadyAddresses: true + selector: + name: query-scheduler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ruler + name: ruler + namespace: default +spec: + ports: + - name: ruler-http-metrics + port: 8080 + targetPort: 8080 + - name: ruler-grpc + port: 9095 + targetPort: 9095 + selector: + name: ruler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: store-gateway + name: store-gateway + namespace: default +spec: + ports: + - name: store-gateway-http-metrics + port: 8080 + targetPort: 8080 + - name: store-gateway-grpc + port: 9095 + targetPort: 9095 + selector: + name: store-gateway +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: consul + namespace: default +spec: + minReadySeconds: 10 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: consul + template: + metadata: + annotations: + consul-hash: e56ef6821a3557604caccaf6d5820239 + labels: + name: consul + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: consul + topologyKey: kubernetes.io/hostname + - labelSelector: + matchLabels: + name: ingester + namespaces: + - default + topologyKey: kubernetes.io/hostname + containers: + - args: + - agent + - -ui + - -server + - -client=0.0.0.0 + - -config-file=/etc/config/consul-config.json + - -bootstrap-expect=1 + - -ui-content-path=/default/consul/ + env: + - name: CHECKPOINT_DISABLE + value: "1" + image: consul:1.5.3 + imagePullPolicy: IfNotPresent + name: consul + ports: + - containerPort: 8300 + name: server + - containerPort: 8301 + name: serf + - containerPort: 8400 + name: client + - containerPort: 8500 + name: api + resources: + requests: + cpu: "4" + memory: 4Gi + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --namespace=$(POD_NAMESPACE) + - --pod-name=$(POD_NAME) + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + image: weaveworks/consul-sidekick:master-f18ad13 + imagePullPolicy: IfNotPresent + name: sidekick + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --web.listen-address=:8000 + - --statsd.mapping-config=/etc/config/mapping + image: prom/statsd-exporter:v0.12.2 + imagePullPolicy: IfNotPresent + name: statsd-exporter + ports: + - containerPort: 8000 + name: http-metrics + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --consul.server=localhost:8500 + - --web.listen-address=:9107 + - --consul.timeout=1s + - --no-consul.health-summary + - --consul.allow_stale + image: prom/consul-exporter:v0.5.0 + imagePullPolicy: IfNotPresent + name: consul-exporter + ports: + - containerPort: 9107 + name: http-metrics + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + serviceAccount: consul-sidekick + volumes: + - configMap: + name: consul + name: consul + - emptyDir: + medium: Memory + name: data +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: distributor + namespace: default +spec: + minReadySeconds: 10 + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: distributor + strategy: + rollingUpdate: + maxSurge: 5 + maxUnavailable: 1 + template: + metadata: + labels: + name: distributor + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: distributor + topologyKey: kubernetes.io/hostname + containers: + - args: + - -distributor.ha-tracker.enable=true + - -distributor.ha-tracker.enable-for-all-users=true + - -distributor.ha-tracker.etcd.endpoints=etcd-client.default.svc.cluster.local.:2379 + - -distributor.ha-tracker.prefix=prom_ha/ + - -distributor.ha-tracker.store=etcd + - -distributor.health-check-ingesters=true + - -distributor.ingestion-burst-size=200000 + - -distributor.ingestion-rate-limit=10000 + - -distributor.ingestion-tenant-shard-size=3 + - -distributor.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -distributor.ring.prefix= + - -distributor.ring.store=consul + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -mem-ballast-size-bytes=1073741824 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.max-connection-age=2m + - -server.grpc.keepalive.max-connection-age-grace=5m + - -server.grpc.keepalive.max-connection-idle=1m + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=distributor + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: distributor + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 4Gi + requests: + cpu: "2" + memory: 2Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: querier + namespace: default +spec: + minReadySeconds: 10 + replicas: 6 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: querier + strategy: + rollingUpdate: + maxSurge: 5 + maxUnavailable: 1 + template: + metadata: + labels: + name: querier + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: querier + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -distributor.health-check-ingesters=true + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -mem-ballast-size-bytes=268435456 + - -querier.frontend-client.grpc-max-send-msg-size=104857600 + - -querier.max-concurrent=8 + - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 + - -querier.shuffle-sharding-ingesters-enabled=false + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -server.http-write-timeout=1m + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=consul + - -store-gateway.tenant-shard-size=3 + - -store.max-query-length=768h + - -target=querier + env: + - name: JAEGER_REPORTER_MAX_QUEUE_SIZE + value: "1024" + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: querier + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 24Gi + requests: + cpu: "1" + memory: 12Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: query-frontend + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: query-frontend + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + template: + metadata: + labels: + name: query-frontend + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: query-frontend + topologyKey: kubernetes.io/hostname + containers: + - args: + - -query-frontend.align-querier-with-step=false + - -query-frontend.cache-results=true + - -query-frontend.max-cache-freshness=10m + - -query-frontend.max-queriers-per-tenant=10 + - -query-frontend.results-cache.backend=memcached + - -query-frontend.results-cache.memcached.addresses=dnssrvnoa+memcached-frontend.default.svc.cluster.local:11211 + - -query-frontend.results-cache.memcached.timeout=500ms + - -query-frontend.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -server.http-write-timeout=1m + - -store.max-query-length=12000h + - -target=query-frontend + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: query-frontend + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 1200Mi + requests: + cpu: "2" + memory: 600Mi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: query-scheduler + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: query-scheduler + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + name: query-scheduler + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: query-scheduler + topologyKey: kubernetes.io/hostname + containers: + - args: + - -query-frontend.max-queriers-per-tenant=10 + - -query-scheduler.max-outstanding-requests-per-tenant=100 + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=query-scheduler + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: query-scheduler + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 2Gi + requests: + cpu: "2" + memory: 1Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ruler + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: ruler + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + name: ruler + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: ruler + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -distributor.health-check-ingesters=true + - -distributor.ingestion-tenant-shard-size=3 + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -querier.shuffle-sharding-ingesters-enabled=false + - -ruler-storage.backend=gcs + - -ruler-storage.gcs.bucket-name=rules-bucket + - -ruler.alertmanager-url=http://alertmanager.default.svc.cluster.local/alertmanager + - -ruler.max-rule-groups-per-tenant=35 + - -ruler.max-rules-per-rule-group=20 + - -ruler.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ruler.ring.store=consul + - -ruler.rule-path=/rules + - -ruler.tenant-shard-size=2 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=consul + - -store-gateway.tenant-shard-size=3 + - -store.max-query-length=768h + - -target=ruler + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ruler + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + cpu: "16" + memory: 16Gi + requests: + cpu: "1" + memory: 6Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + terminationGracePeriodSeconds: 600 + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: alertmanager + name: alertmanager + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: alertmanager + serviceName: alertmanager + template: + metadata: + labels: + name: alertmanager + spec: + containers: + - args: + - -alertmanager-storage.backend=gcs + - -alertmanager-storage.gcs.bucket-name=alerts-bucket + - -alertmanager.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -alertmanager.sharding-ring.replication-factor=3 + - -alertmanager.sharding-ring.store=consul + - -alertmanager.storage.path=/data + - -alertmanager.web.external-url=http://test/alertmanager + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=alertmanager + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: alertmanager + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 15Gi + requests: + cpu: "2" + memory: 10Gi + volumeMounts: + - mountPath: /data + name: alertmanager-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 900 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: alertmanager-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: compactor + name: compactor + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + name: compactor + serviceName: compactor + template: + metadata: + labels: + name: compactor + spec: + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -compactor.block-ranges=2h,12h,24h + - -compactor.blocks-retention-period=0 + - -compactor.cleanup-interval=15m + - -compactor.compaction-concurrency=1 + - -compactor.compaction-interval=30m + - -compactor.compactor-tenant-shard-size=1 + - -compactor.data-dir=/data + - -compactor.deletion-delay=2h + - -compactor.max-closing-blocks-concurrency=2 + - -compactor.max-opening-blocks-concurrency=4 + - -compactor.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -compactor.ring.prefix= + - -compactor.ring.store=consul + - -compactor.ring.wait-stability-min-duration=1m + - -compactor.split-and-merge-shards=0 + - -compactor.split-groups=1 + - -compactor.symbols-flushers-concurrency=4 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=compactor + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: compactor + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 6Gi + requests: + cpu: 1 + memory: 6Gi + volumeMounts: + - mountPath: /data + name: compactor-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 900 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: compactor-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 250Gi + storageClassName: standard +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: ingester + name: ingester + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 3 + selector: + matchLabels: + name: ingester + serviceName: ingester + template: + metadata: + labels: + name: ingester + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: ingester + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -blocks-storage.tsdb.block-ranges-period=2h + - -blocks-storage.tsdb.dir=/data/tsdb + - -blocks-storage.tsdb.ship-interval=1m + - -distributor.health-check-ingesters=true + - -distributor.ingestion-tenant-shard-size=3 + - -ingester.max-global-series-per-metric=20000 + - -ingester.max-global-series-per-user=150000 + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-period=15s + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.num-tokens=512 + - -ingester.ring.prefix= + - -ingester.ring.readiness-check-ring-health=false + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -ingester.ring.tokens-file-path=/data/tokens + - -ingester.ring.unregister-on-shutdown=true + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc-max-concurrent-streams=10000 + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=ingester + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ingester + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 25Gi + requests: + cpu: "4" + memory: 15Gi + volumeMounts: + - mountPath: /data + name: ingester-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 1200 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: ingester-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi + storageClassName: fast +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached + serviceName: memcached + template: + metadata: + labels: + name: memcached + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 6144 + - -I 1m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 9Gi + requests: + cpu: 500m + memory: 6552Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-frontend + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached-frontend + serviceName: memcached-frontend + template: + metadata: + labels: + name: memcached-frontend + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-frontend + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 1024 + - -I 5m + - -c 1024 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 1536Mi + requests: + cpu: 500m + memory: 1329Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-index-queries + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached-index-queries + serviceName: memcached-index-queries + template: + metadata: + labels: + name: memcached-index-queries + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-index-queries + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 1024 + - -I 5m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 1536Mi + requests: + cpu: 500m + memory: 1329Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-metadata + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + name: memcached-metadata + serviceName: memcached-metadata + template: + metadata: + labels: + name: memcached-metadata + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-metadata + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 512 + - -I 1m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 768Mi + requests: + cpu: 500m + memory: 715Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: store-gateway + name: store-gateway + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 3 + selector: + matchLabels: + name: store-gateway + serviceName: store-gateway + template: + metadata: + labels: + name: store-gateway + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: store-gateway + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.chunks-cache.backend=memcached + - -blocks-storage.bucket-store.chunks-cache.memcached.addresses=dnssrvnoa+memcached.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms + - -blocks-storage.bucket-store.index-cache.backend=memcached + - -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880 + - -blocks-storage.bucket-store.index-header-lazy-loading-enabled=true + - -blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout=60m + - -blocks-storage.bucket-store.max-chunk-pool-bytes=12884901888 + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.tokens-file-path=/data/tokens + - -store-gateway.sharding-ring.wait-stability-min-duration=1m + - -store-gateway.tenant-shard-size=3 + - -target=store-gateway + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: store-gateway + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 18Gi + requests: + cpu: "1" + memory: 12Gi + volumeMounts: + - mountPath: /data + name: store-gateway-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 120 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: store-gateway-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Gi + storageClassName: standard +--- +apiVersion: etcd.database.coreos.com/v1beta2 +kind: EtcdCluster +metadata: + annotations: + etcd.database.coreos.com/scope: clusterwide + name: etcd + namespace: default +spec: + pod: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + etcd_cluster: etcd + topologyKey: kubernetes.io/hostname + annotations: + prometheus.io/port: "2379" + prometheus.io/scrape: "true" + etcdEnv: + - name: ETCD_AUTO_COMPACTION_RETENTION + value: 1h + labels: + name: etcd + resources: + limits: + memory: 512Mi + requests: + cpu: 500m + memory: 512Mi + size: 3 + version: 3.3.13 diff --git a/operations/mimir-tests/test-shuffle-sharding-read-path-disabled.jsonnet b/operations/mimir-tests/test-shuffle-sharding-read-path-disabled.jsonnet new file mode 100644 index 0000000000..bfbf04f6ea --- /dev/null +++ b/operations/mimir-tests/test-shuffle-sharding-read-path-disabled.jsonnet @@ -0,0 +1,29 @@ +local mimir = import 'mimir/mimir.libsonnet'; + +mimir { + _config+:: { + namespace: 'default', + external_url: 'http://test', + + blocks_storage_backend: 'gcs', + blocks_storage_bucket_name: 'blocks-bucket', + bucket_index_enabled: true, + query_scheduler_enabled: true, + + ruler_enabled: true, + ruler_client_type: 'gcs', + ruler_storage_bucket_name: 'rules-bucket', + + alertmanager_enabled: true, + alertmanager_client_type: 'gcs', + alertmanager_gcs_bucket_name: 'alerts-bucket', + + shuffle_sharding+:: { + ingester_write_path_enabled: true, + ingester_read_path_enabled: false, + querier_enabled: true, + ruler_enabled: true, + store_gateway_enabled: true, + }, + }, +} diff --git a/operations/mimir/shuffle-sharding.libsonnet b/operations/mimir/shuffle-sharding.libsonnet index 9c863e117a..4dfa9a3cb8 100644 --- a/operations/mimir/shuffle-sharding.libsonnet +++ b/operations/mimir/shuffle-sharding.libsonnet @@ -100,6 +100,11 @@ if !$._config.shuffle_sharding.store_gateway_enabled then {} else { 'store-gateway.tenant-shard-size': $._config.shuffle_sharding.store_gateway_shard_size, } + ) + ( + if !($._config.shuffle_sharding.ingester_write_path_enabled && !$._config.shuffle_sharding.ingester_read_path_enabled) then {} else { + // If shuffle sharding is enabled for the write path but isn't enabled for the read path, Mimir will query all ingesters + 'querier.shuffle-sharding-ingesters-enabled': 'false', + } ) + ( if !$._config.shuffle_sharding.ingester_read_path_enabled then {} else { 'distributor.ingestion-tenant-shard-size': $._config.shuffle_sharding.ingester_shard_size, From 0ef94db02f838c72c8d5e19afec4d57ac591a32a Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Tue, 21 Jun 2022 15:22:11 +0200 Subject: [PATCH 20/63] Canceled distributor push reqs returning 499 (#2157) * Canceled distributor push reqs returning 499 Canceled requests are not internal errors of the service, and shouldn't be accounted or logged as such. Fixes https://github.com/grafana/mimir/issues/2156 Signed-off-by: Oleg Zaytsev * Update CHANGELOG.md Signed-off-by: Oleg Zaytsev --- CHANGELOG.md | 1 + pkg/distributor/distributor_test.go | 28 ++++++++++++++++++++++++++++ pkg/util/push/push.go | 7 +++++++ pkg/util/push/push_test.go | 13 +++++++++++++ 4 files changed, 49 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 47c1a75c6d..18333d6eeb 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -44,6 +44,7 @@ * [BUGFIX] Limits: Active series custom tracker configuration has been named back from `active_series_custom_trackers_config` to `active_series_custom_trackers`. For backwards compatibility both version is going to be supported for until Mimir v2.4. When both fields are specified, `active_series_custom_trackers_config` takes precedence over `active_series_custom_trackers`. #2101 * [BUGFIX] Ingester: fixed the order of labels applied when incrementing the `cortex_discarded_metadata_total` metric. #2096 * [BUGFIX] Ingester: fixed bug where retrieving metadata for a metric with multiple metadata entries would return multiple copies of a single metadata entry rather than all available entries. #2096 +* [BUGFIX] Distributor: canceled requests are no longer accounted as internal errors. #2157 ### Mixin diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 006c6d1791..6c6691a5de 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -305,6 +305,34 @@ func TestDistributor_Push(t *testing.T) { } } +func TestDistributor_ContextCanceledRequest(t *testing.T) { + now := time.Now() + mtime.NowForce(now) + t.Cleanup(mtime.NowReset) + + ds, ings, _ := prepare(t, prepConfig{ + numIngesters: 3, + happyIngesters: 3, + numDistributors: 1, + }) + + // Lock all mockIngester instances, so they will be waiting + for i := range ings { + ings[i].Lock() + defer func(ing *mockIngester) { + ing.Unlock() + }(&ings[i]) + } + + ctx := user.InjectOrgID(context.Background(), "user") + ctx, cancel := context.WithCancel(ctx) + cancel() + request := makeWriteRequest(123456789000, 1, 1, false) + _, err := ds[0].Push(ctx, request) + require.Error(t, err) + require.ErrorIs(t, err, context.Canceled) +} + func TestDistributor_MetricsCleanup(t *testing.T) { dists, _, regs := prepare(t, prepConfig{ numDistributors: 1, diff --git a/pkg/util/push/push.go b/pkg/util/push/push.go index 3b1197fc27..0ffcef4ff0 100644 --- a/pkg/util/push/push.go +++ b/pkg/util/push/push.go @@ -7,6 +7,7 @@ package push import ( "context" + "errors" "net/http" "sync" @@ -32,6 +33,7 @@ var bufferPool = sync.Pool{ } const SkipLabelNameValidationHeader = "X-Mimir-SkipLabelNameValidation" +const statusClientClosedRequest = 499 // Handler is a http.Handler which accepts WriteRequests. func Handler( @@ -80,6 +82,11 @@ func Handler( } if _, err := push(ctx, &req.WriteRequest, cleanup); err != nil { + if errors.Is(err, context.Canceled) { + http.Error(w, err.Error(), statusClientClosedRequest) + level.Warn(logger).Log("msg", "push request canceled", "err", err) + return + } resp, ok := httpgrpc.HTTPResponseFromError(err) if !ok { http.Error(w, err.Error(), http.StatusInternalServerError) diff --git a/pkg/util/push/push_test.go b/pkg/util/push/push_test.go index 654ad8c580..9999facaaa 100644 --- a/pkg/util/push/push_test.go +++ b/pkg/util/push/push_test.go @@ -8,6 +8,7 @@ package push import ( "bytes" "context" + "fmt" "net/http" "net/http/httptest" "testing" @@ -39,6 +40,18 @@ func TestHandler_cortexWriteRequest(t *testing.T) { assert.Equal(t, 200, resp.Code) } +func TestHandler_contextCanceledRequest(t *testing.T) { + req := createRequest(t, createMimirWriteRequestProtobuf(t, false)) + resp := httptest.NewRecorder() + sourceIPs, _ := middleware.NewSourceIPs("SomeField", "(.*)") + handler := Handler(100000, sourceIPs, false, func(_ context.Context, _ *mimirpb.WriteRequest, cleanup func()) (*mimirpb.WriteResponse, error) { + defer cleanup() + return nil, fmt.Errorf("the request failed: %w", context.Canceled) + }) + handler.ServeHTTP(resp, req) + assert.Equal(t, 499, resp.Code) +} + func TestHandler_EnsureSkipLabelNameValidationBehaviour(t *testing.T) { tests := []struct { name string From 722705abf1cbebeee5502dc5dbead31cf6447764 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20=C5=A0tibran=C3=BD?= Date: Tue, 21 Jun 2022 15:46:43 +0200 Subject: [PATCH 21/63] jsonnet: make memberlist the default KV store (#2161) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Make memberlist the default KV store. Signed-off-by: Peter Štibraný * CHANGELOG.md Signed-off-by: Peter Štibraný --- CHANGELOG.md | 1 + .../test-autoscaling-generated.yaml | 512 +--- ...jsonnet => test-consul-multi-zone.jsonnet} | 3 +- ...net => test-consul-ruler-disabled.jsonnet} | 3 +- ...est-gossip.jsonnet => test-consul.jsonnet} | 3 +- .../mimir-tests/test-defaults-generated.yaml | 487 +--- ...est-disable-chunk-streaming-generated.yaml | 512 +--- .../mimir-tests/test-gossip-generated.yaml | 1426 ----------- .../test-gossip-multi-zone-generated.yaml | 2161 ----------------- .../test-gossip-ruler-disabled-generated.yaml | 1315 ---------- .../test-multi-zone-generated.yaml | 560 +---- ...zone-with-ongoing-migration-generated.yaml | 584 ++--- .../test-query-sharding-generated.yaml | 512 +--- ...est-ruler-remote-evaluation-generated.yaml | 526 +--- ...remote-evaluation-migration-generated.yaml | 526 +--- .../test-shuffle-sharding-generated.yaml | 512 +--- .../test-storage-azure-generated.yaml | 512 +--- .../test-storage-gcs-generated.yaml | 512 +--- .../test-storage-s3-generated.yaml | 512 +--- operations/mimir/memberlist.libsonnet | 2 +- 20 files changed, 1119 insertions(+), 10062 deletions(-) rename operations/mimir-tests/{test-gossip-multi-zone.jsonnet => test-consul-multi-zone.jsonnet} (90%) rename operations/mimir-tests/{test-gossip-ruler-disabled.jsonnet => test-consul-ruler-disabled.jsonnet} (86%) rename operations/mimir-tests/{test-gossip.jsonnet => test-consul.jsonnet} (87%) delete mode 100644 operations/mimir-tests/test-gossip-generated.yaml delete mode 100644 operations/mimir-tests/test-gossip-multi-zone-generated.yaml delete mode 100644 operations/mimir-tests/test-gossip-ruler-disabled-generated.yaml diff --git a/CHANGELOG.md b/CHANGELOG.md index 18333d6eeb..6d308e5007 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -71,6 +71,7 @@ * [CHANGE] Remove use of `-querier.query-store-after`, `-querier.shuffle-sharding-ingesters-lookback-period`, `-blocks-storage.bucket-store.ignore-blocks-within`, and `-blocks-storage.tsdb.close-idle-tsdb-timeout` CLI flags since the values now match defaults. #1915 #1921 * [CHANGE] Change default value for `-blocks-storage.bucket-store.chunks-cache.memcached.timeout` to `450ms` to increase use of cached data. #2035 * [CHANGE] The `memberlist_ring_enabled` configuration now applies to Alertmanager. #2102 +* [CHANGE] Default value for `memberlist_ring_enabled` is now true. It means that all hash rings use Memberlist as default KV store instead of Consul (previous default). #2161 * [FEATURE] Added querier autoscaling support. It requires [KEDA](https://keda.sh) installed in the Kubernetes cluster and query-scheduler enabled in the Mimir cluster. Querier autoscaler can be enabled and configure through the following options in the jsonnet config: #2013 #2023 * `autoscaling_querier_enabled`: `true` to enable autoscaling. * `autoscaling_querier_min_replicas`: minimum number of querier replicas. diff --git a/operations/mimir-tests/test-autoscaling-generated.yaml b/operations/mimir-tests/test-autoscaling-generated.yaml index 75fdfde2ce..3fa65be848 100644 --- a/operations/mimir-tests/test-autoscaling-generated.yaml +++ b/operations/mimir-tests/test-autoscaling-generated.yaml @@ -43,225 +43,6 @@ spec: name: store-gateway --- apiVersion: v1 -kind: ServiceAccount -metadata: - name: consul-sidekick - namespace: default ---- -apiVersion: v1 -data: - consul-config.json: '{"leave_on_terminate": true, "raft_snapshot_threshold": 128, - "raft_trailing_logs": 10000, "telemetry": {"dogstatsd_addr": "127.0.0.1:9125"}}' - mapping: | - mappings: - - match: consul.*.runtime.* - name: consul_runtime - labels: - type: $2 - - match: consul.runtime.total_gc_pause_ns - name: consul_runtime_total_gc_pause_ns - labels: - type: $2 - - match: consul.consul.health.service.query-tag.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3 - - match: consul.consul.health.service.query-tag.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4 - - match: consul.consul.health.service.query-tag.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11.$12 - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.dns.domain_query.*.*.*.*.* - name: consul_dns_domain_query - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.not-found.* - name: consul_health_service_not_found - labels: - query: $1 - - match: consul.consul.health.service.query.* - name: consul_health_service_query - labels: - query: $1 - - match: consul.*.memberlist.health.score - name: consul_memberlist_health_score - labels: {} - - match: consul.serf.queue.* - name: consul_serf_events - labels: - type: $1 - - match: consul.serf.snapshot.appendLine - name: consul_serf_snapshot_appendLine - labels: - type: $1 - - match: consul.serf.coordinate.adjustment-ms - name: consul_serf_coordinate_adjustment_ms - labels: {} - - match: consul.consul.rpc.query - name: consul_rpc_query - labels: {} - - match: consul.*.consul.session_ttl.active - name: consul_session_ttl_active - labels: {} - - match: consul.raft.rpc.* - name: consul_raft_rpc - labels: - type: $1 - - match: consul.raft.rpc.appendEntries.storeLogs - name: consul_raft_rpc_appendEntries_storeLogs - labels: - type: $1 - - match: consul.consul.fsm.persist - name: consul_fsm_persist - labels: {} - - match: consul.raft.fsm.apply - name: consul_raft_fsm_apply - labels: {} - - match: consul.raft.leader.lastContact - name: consul_raft_leader_lastcontact - labels: {} - - match: consul.raft.leader.dispatchLog - name: consul_raft_leader_dispatchLog - labels: {} - - match: consul.raft.commitTime - name: consul_raft_commitTime - labels: {} - - match: consul.raft.replication.appendEntries.logs.*.*.*.* - name: consul_raft_replication_appendEntries_logs - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.appendEntries.rpc.*.*.*.* - name: consul_raft_replication_appendEntries_rpc - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.heartbeat.*.*.*.* - name: consul_raft_replication_heartbeat - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.consul.rpc.request - name: consul_rpc_requests - labels: {} - - match: consul.consul.rpc.accept_conn - name: consul_rpc_accept_conn - labels: {} - - match: consul.memberlist.udp.* - name: consul_memberlist_udp - labels: - type: $1 - - match: consul.memberlist.tcp.* - name: consul_memberlist_tcp - labels: - type: $1 - - match: consul.memberlist.gossip - name: consul_memberlist_gossip - labels: {} - - match: consul.memberlist.probeNode - name: consul_memberlist_probenode - labels: {} - - match: consul.memberlist.pushPullNode - name: consul_memberlist_pushpullnode - labels: {} - - match: consul.http.* - name: consul_http_request - labels: - method: $1 - path: / - - match: consul.http.*.* - name: consul_http_request - labels: - method: $1 - path: /$2 - - match: consul.http.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3 - - match: consul.http.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4 - - match: consul.http.*.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4/$5 - - match: consul.consul.leader.barrier - name: consul_leader_barrier - labels: {} - - match: consul.consul.leader.reconcileMember - name: consul_leader_reconcileMember - labels: {} - - match: consul.consul.leader.reconcile - name: consul_leader_reconcile - labels: {} - - match: consul.consul.fsm.coordinate.batch-update - name: consul_fsm_coordinate_batch_update - labels: {} - - match: consul.consul.fsm.autopilot - name: consul_fsm_autopilot - labels: {} - - match: consul.consul.fsm.kvs.cas - name: consul_fsm_kvs_cas - labels: {} - - match: consul.consul.fsm.register - name: consul_fsm_register - labels: {} - - match: consul.consul.fsm.deregister - name: consul_fsm_deregister - labels: {} - - match: consul.consul.fsm.tombstone.reap - name: consul_fsm_tombstone_reap - labels: {} - - match: consul.consul.catalog.register - name: consul_catalog_register - labels: {} - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.leader.reapTombstones - name: consul_leader_reapTombstones - labels: {} -kind: ConfigMap -metadata: - name: consul - namespace: default ---- -apiVersion: v1 data: overrides.yaml: | overrides: {} @@ -270,38 +51,6 @@ metadata: name: overrides namespace: default --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: consul-sidekick - namespace: default -rules: -- apiGroups: - - "" - - extensions - - apps - resources: - - pods - - replicasets - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: consul-sidekick - namespace: default -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: consul-sidekick -subjects: -- kind: ServiceAccount - name: consul-sidekick - namespace: default ---- apiVersion: v1 kind: Service metadata: @@ -318,6 +67,9 @@ spec: - name: alertmanager-grpc port: 9095 targetPort: 9095 + - name: alertmanager-gossip-ring + port: 7946 + targetPort: 7946 selector: name: alertmanager --- @@ -337,41 +89,14 @@ spec: - name: compactor-grpc port: 9095 targetPort: 9095 + - name: compactor-gossip-ring + port: 7946 + targetPort: 7946 selector: name: compactor --- apiVersion: v1 kind: Service -metadata: - labels: - name: consul - name: consul - namespace: default -spec: - ports: - - name: consul-server - port: 8300 - targetPort: 8300 - - name: consul-serf - port: 8301 - targetPort: 8301 - - name: consul-client - port: 8400 - targetPort: 8400 - - name: consul-api - port: 8500 - targetPort: 8500 - - name: statsd-exporter-http-metrics - port: 8000 - targetPort: 8000 - - name: consul-exporter-http-metrics - port: 9107 - targetPort: 9107 - selector: - name: consul ---- -apiVersion: v1 -kind: Service metadata: labels: name: distributor @@ -386,11 +111,29 @@ spec: - name: distributor-grpc port: 9095 targetPort: 9095 + - name: distributor-gossip-ring + port: 7946 + targetPort: 7946 selector: name: distributor --- apiVersion: v1 kind: Service +metadata: + name: gossip-ring + namespace: default +spec: + clusterIP: None + ports: + - name: gossip-ring + port: 7946 + protocol: TCP + targetPort: 7946 + selector: + gossip_ring_member: "true" +--- +apiVersion: v1 +kind: Service metadata: labels: name: ingester @@ -404,6 +147,9 @@ spec: - name: ingester-grpc port: 9095 targetPort: 9095 + - name: ingester-gossip-ring + port: 7946 + targetPort: 7946 selector: name: ingester --- @@ -498,6 +244,9 @@ spec: - name: querier-grpc port: 9095 targetPort: 9095 + - name: querier-gossip-ring + port: 7946 + targetPort: 7946 selector: name: querier --- @@ -610,136 +359,14 @@ spec: - name: store-gateway-grpc port: 9095 targetPort: 9095 + - name: store-gateway-gossip-ring + port: 7946 + targetPort: 7946 selector: name: store-gateway --- apiVersion: apps/v1 kind: Deployment -metadata: - name: consul - namespace: default -spec: - minReadySeconds: 10 - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: consul - template: - metadata: - annotations: - consul-hash: e56ef6821a3557604caccaf6d5820239 - labels: - name: consul - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: consul - topologyKey: kubernetes.io/hostname - - labelSelector: - matchLabels: - name: ingester - namespaces: - - default - topologyKey: kubernetes.io/hostname - containers: - - args: - - agent - - -ui - - -server - - -client=0.0.0.0 - - -config-file=/etc/config/consul-config.json - - -bootstrap-expect=1 - - -ui-content-path=/default/consul/ - env: - - name: CHECKPOINT_DISABLE - value: "1" - image: consul:1.5.3 - imagePullPolicy: IfNotPresent - name: consul - ports: - - containerPort: 8300 - name: server - - containerPort: 8301 - name: serf - - containerPort: 8400 - name: client - - containerPort: 8500 - name: api - resources: - requests: - cpu: "4" - memory: 4Gi - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --namespace=$(POD_NAMESPACE) - - --pod-name=$(POD_NAME) - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: weaveworks/consul-sidekick:master-f18ad13 - imagePullPolicy: IfNotPresent - name: sidekick - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --web.listen-address=:8000 - - --statsd.mapping-config=/etc/config/mapping - image: prom/statsd-exporter:v0.12.2 - imagePullPolicy: IfNotPresent - name: statsd-exporter - ports: - - containerPort: 8000 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --consul.server=localhost:8500 - - --web.listen-address=:9107 - - --consul.timeout=1s - - --no-consul.health-summary - - --consul.allow_stale - image: prom/consul-exporter:v0.5.0 - imagePullPolicy: IfNotPresent - name: consul-exporter - ports: - - containerPort: 9107 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - serviceAccount: consul-sidekick - volumes: - - configMap: - name: consul - name: consul - - emptyDir: - medium: Memory - name: data ---- -apiVersion: apps/v1 -kind: Deployment metadata: name: distributor namespace: default @@ -757,6 +384,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: distributor spec: affinity: @@ -776,15 +404,16 @@ spec: - -distributor.health-check-ingesters=true - -distributor.ingestion-burst-size=200000 - -distributor.ingestion-rate-limit=10000 - - -distributor.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -distributor.ring.prefix= - - -distributor.ring.store=consul - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -distributor.ring.store=memberlist - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=1073741824 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.max-connection-age=2m - -server.grpc.keepalive.max-connection-age-grace=5m @@ -801,6 +430,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -839,6 +470,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: querier spec: affinity: @@ -859,12 +491,14 @@ spec: - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - -distributor.health-check-ingesters=true - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 - -querier.max-concurrent=8 - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 @@ -873,10 +507,9 @@ spec: - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - -server.http-write-timeout=1m - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store.max-query-length=768h - -target=querier env: @@ -890,6 +523,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1064,6 +699,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ruler spec: affinity: @@ -1084,27 +720,27 @@ spec: - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - -distributor.health-check-ingesters=true - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.backend=gcs - -ruler-storage.gcs.bucket-name=rules-bucket - -ruler.alertmanager-url=http://alertmanager.default.svc.cluster.local/alertmanager - -ruler.max-rule-groups-per-tenant=35 - -ruler.max-rules-per-rule-group=20 - - -ruler.ring.consul.hostname=consul.default.svc.cluster.local:8500 - - -ruler.ring.store=consul + - -ruler.ring.store=memberlist - -ruler.rule-path=/rules - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store.max-query-length=768h - -target=ruler image: grafana/mimir:2.1.0 @@ -1153,17 +789,20 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: alertmanager spec: containers: - args: - -alertmanager-storage.backend=gcs - -alertmanager-storage.gcs.bucket-name=alerts-bucket - - -alertmanager.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -alertmanager.sharding-ring.replication-factor=3 - - -alertmanager.sharding-ring.store=consul + - -alertmanager.sharding-ring.store=memberlist - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true @@ -1182,6 +821,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1237,6 +878,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: compactor spec: containers: @@ -1253,13 +895,15 @@ spec: - -compactor.deletion-delay=2h - -compactor.max-closing-blocks-concurrency=2 - -compactor.max-opening-blocks-concurrency=4 - - -compactor.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -compactor.ring.prefix= - - -compactor.ring.store=consul + - -compactor.ring.store=memberlist - -compactor.ring.wait-stability-min-duration=1m - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true @@ -1273,6 +917,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1329,6 +975,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ingester spec: affinity: @@ -1348,16 +995,18 @@ spec: - -distributor.health-check-ingesters=true - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-period=15s - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.num-tokens=512 - -ingester.ring.prefix= - -ingester.ring.readiness-check-ring-health=false - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc-max-concurrent-streams=10000 - -server.grpc.keepalive.min-time-between-pings=10s @@ -1372,6 +1021,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1640,6 +1291,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: store-gateway spec: affinity: @@ -1677,14 +1329,16 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store-gateway.sharding-ring.tokens-file-path=/data/tokens - -store-gateway.sharding-ring.wait-stability-min-duration=1m - -target=store-gateway @@ -1696,6 +1350,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready diff --git a/operations/mimir-tests/test-gossip-multi-zone.jsonnet b/operations/mimir-tests/test-consul-multi-zone.jsonnet similarity index 90% rename from operations/mimir-tests/test-gossip-multi-zone.jsonnet rename to operations/mimir-tests/test-consul-multi-zone.jsonnet index 15b534a8d8..4e3a7fa46b 100644 --- a/operations/mimir-tests/test-gossip-multi-zone.jsonnet +++ b/operations/mimir-tests/test-consul-multi-zone.jsonnet @@ -5,7 +5,8 @@ mimir { namespace: 'default', external_url: 'http://test', - memberlist_ring_enabled: true, + // Use consul for hash rings. + memberlist_ring_enabled: false, blocks_storage_backend: 'gcs', blocks_storage_bucket_name: 'blocks-bucket', diff --git a/operations/mimir-tests/test-gossip-ruler-disabled.jsonnet b/operations/mimir-tests/test-consul-ruler-disabled.jsonnet similarity index 86% rename from operations/mimir-tests/test-gossip-ruler-disabled.jsonnet rename to operations/mimir-tests/test-consul-ruler-disabled.jsonnet index a246e4e767..9edf652794 100644 --- a/operations/mimir-tests/test-gossip-ruler-disabled.jsonnet +++ b/operations/mimir-tests/test-consul-ruler-disabled.jsonnet @@ -5,7 +5,8 @@ mimir { namespace: 'default', external_url: 'http://test', - memberlist_ring_enabled: true, + // Use consul for hash rings. + memberlist_ring_enabled: false, blocks_storage_backend: 'gcs', blocks_storage_bucket_name: 'blocks-bucket', diff --git a/operations/mimir-tests/test-gossip.jsonnet b/operations/mimir-tests/test-consul.jsonnet similarity index 87% rename from operations/mimir-tests/test-gossip.jsonnet rename to operations/mimir-tests/test-consul.jsonnet index f3c9f33b2d..5caf68b53e 100644 --- a/operations/mimir-tests/test-gossip.jsonnet +++ b/operations/mimir-tests/test-consul.jsonnet @@ -5,7 +5,8 @@ mimir { namespace: 'default', external_url: 'http://test', - memberlist_ring_enabled: true, + // Use consul for hash rings. + memberlist_ring_enabled: false, blocks_storage_backend: 'gcs', blocks_storage_bucket_name: 'blocks-bucket', diff --git a/operations/mimir-tests/test-defaults-generated.yaml b/operations/mimir-tests/test-defaults-generated.yaml index fa91695fad..2d94c3455c 100644 --- a/operations/mimir-tests/test-defaults-generated.yaml +++ b/operations/mimir-tests/test-defaults-generated.yaml @@ -30,225 +30,6 @@ spec: name: store-gateway --- apiVersion: v1 -kind: ServiceAccount -metadata: - name: consul-sidekick - namespace: default ---- -apiVersion: v1 -data: - consul-config.json: '{"leave_on_terminate": true, "raft_snapshot_threshold": 128, - "raft_trailing_logs": 10000, "telemetry": {"dogstatsd_addr": "127.0.0.1:9125"}}' - mapping: | - mappings: - - match: consul.*.runtime.* - name: consul_runtime - labels: - type: $2 - - match: consul.runtime.total_gc_pause_ns - name: consul_runtime_total_gc_pause_ns - labels: - type: $2 - - match: consul.consul.health.service.query-tag.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3 - - match: consul.consul.health.service.query-tag.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4 - - match: consul.consul.health.service.query-tag.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11.$12 - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.dns.domain_query.*.*.*.*.* - name: consul_dns_domain_query - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.not-found.* - name: consul_health_service_not_found - labels: - query: $1 - - match: consul.consul.health.service.query.* - name: consul_health_service_query - labels: - query: $1 - - match: consul.*.memberlist.health.score - name: consul_memberlist_health_score - labels: {} - - match: consul.serf.queue.* - name: consul_serf_events - labels: - type: $1 - - match: consul.serf.snapshot.appendLine - name: consul_serf_snapshot_appendLine - labels: - type: $1 - - match: consul.serf.coordinate.adjustment-ms - name: consul_serf_coordinate_adjustment_ms - labels: {} - - match: consul.consul.rpc.query - name: consul_rpc_query - labels: {} - - match: consul.*.consul.session_ttl.active - name: consul_session_ttl_active - labels: {} - - match: consul.raft.rpc.* - name: consul_raft_rpc - labels: - type: $1 - - match: consul.raft.rpc.appendEntries.storeLogs - name: consul_raft_rpc_appendEntries_storeLogs - labels: - type: $1 - - match: consul.consul.fsm.persist - name: consul_fsm_persist - labels: {} - - match: consul.raft.fsm.apply - name: consul_raft_fsm_apply - labels: {} - - match: consul.raft.leader.lastContact - name: consul_raft_leader_lastcontact - labels: {} - - match: consul.raft.leader.dispatchLog - name: consul_raft_leader_dispatchLog - labels: {} - - match: consul.raft.commitTime - name: consul_raft_commitTime - labels: {} - - match: consul.raft.replication.appendEntries.logs.*.*.*.* - name: consul_raft_replication_appendEntries_logs - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.appendEntries.rpc.*.*.*.* - name: consul_raft_replication_appendEntries_rpc - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.heartbeat.*.*.*.* - name: consul_raft_replication_heartbeat - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.consul.rpc.request - name: consul_rpc_requests - labels: {} - - match: consul.consul.rpc.accept_conn - name: consul_rpc_accept_conn - labels: {} - - match: consul.memberlist.udp.* - name: consul_memberlist_udp - labels: - type: $1 - - match: consul.memberlist.tcp.* - name: consul_memberlist_tcp - labels: - type: $1 - - match: consul.memberlist.gossip - name: consul_memberlist_gossip - labels: {} - - match: consul.memberlist.probeNode - name: consul_memberlist_probenode - labels: {} - - match: consul.memberlist.pushPullNode - name: consul_memberlist_pushpullnode - labels: {} - - match: consul.http.* - name: consul_http_request - labels: - method: $1 - path: / - - match: consul.http.*.* - name: consul_http_request - labels: - method: $1 - path: /$2 - - match: consul.http.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3 - - match: consul.http.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4 - - match: consul.http.*.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4/$5 - - match: consul.consul.leader.barrier - name: consul_leader_barrier - labels: {} - - match: consul.consul.leader.reconcileMember - name: consul_leader_reconcileMember - labels: {} - - match: consul.consul.leader.reconcile - name: consul_leader_reconcile - labels: {} - - match: consul.consul.fsm.coordinate.batch-update - name: consul_fsm_coordinate_batch_update - labels: {} - - match: consul.consul.fsm.autopilot - name: consul_fsm_autopilot - labels: {} - - match: consul.consul.fsm.kvs.cas - name: consul_fsm_kvs_cas - labels: {} - - match: consul.consul.fsm.register - name: consul_fsm_register - labels: {} - - match: consul.consul.fsm.deregister - name: consul_fsm_deregister - labels: {} - - match: consul.consul.fsm.tombstone.reap - name: consul_fsm_tombstone_reap - labels: {} - - match: consul.consul.catalog.register - name: consul_catalog_register - labels: {} - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.leader.reapTombstones - name: consul_leader_reapTombstones - labels: {} -kind: ConfigMap -metadata: - name: consul - namespace: default ---- -apiVersion: v1 data: overrides.yaml: | overrides: {} @@ -257,38 +38,6 @@ metadata: name: overrides namespace: default --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: consul-sidekick - namespace: default -rules: -- apiGroups: - - "" - - extensions - - apps - resources: - - pods - - replicasets - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: consul-sidekick - namespace: default -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: consul-sidekick -subjects: -- kind: ServiceAccount - name: consul-sidekick - namespace: default ---- apiVersion: v1 kind: Service metadata: @@ -305,41 +54,14 @@ spec: - name: compactor-grpc port: 9095 targetPort: 9095 + - name: compactor-gossip-ring + port: 7946 + targetPort: 7946 selector: name: compactor --- apiVersion: v1 kind: Service -metadata: - labels: - name: consul - name: consul - namespace: default -spec: - ports: - - name: consul-server - port: 8300 - targetPort: 8300 - - name: consul-serf - port: 8301 - targetPort: 8301 - - name: consul-client - port: 8400 - targetPort: 8400 - - name: consul-api - port: 8500 - targetPort: 8500 - - name: statsd-exporter-http-metrics - port: 8000 - targetPort: 8000 - - name: consul-exporter-http-metrics - port: 9107 - targetPort: 9107 - selector: - name: consul ---- -apiVersion: v1 -kind: Service metadata: labels: name: distributor @@ -354,11 +76,29 @@ spec: - name: distributor-grpc port: 9095 targetPort: 9095 + - name: distributor-gossip-ring + port: 7946 + targetPort: 7946 selector: name: distributor --- apiVersion: v1 kind: Service +metadata: + name: gossip-ring + namespace: default +spec: + clusterIP: None + ports: + - name: gossip-ring + port: 7946 + protocol: TCP + targetPort: 7946 + selector: + gossip_ring_member: "true" +--- +apiVersion: v1 +kind: Service metadata: labels: name: ingester @@ -372,6 +112,9 @@ spec: - name: ingester-grpc port: 9095 targetPort: 9095 + - name: ingester-gossip-ring + port: 7946 + targetPort: 7946 selector: name: ingester --- @@ -466,6 +209,9 @@ spec: - name: querier-grpc port: 9095 targetPort: 9095 + - name: querier-gossip-ring + port: 7946 + targetPort: 7946 selector: name: querier --- @@ -522,136 +268,14 @@ spec: - name: store-gateway-grpc port: 9095 targetPort: 9095 + - name: store-gateway-gossip-ring + port: 7946 + targetPort: 7946 selector: name: store-gateway --- apiVersion: apps/v1 kind: Deployment -metadata: - name: consul - namespace: default -spec: - minReadySeconds: 10 - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: consul - template: - metadata: - annotations: - consul-hash: e56ef6821a3557604caccaf6d5820239 - labels: - name: consul - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: consul - topologyKey: kubernetes.io/hostname - - labelSelector: - matchLabels: - name: ingester - namespaces: - - default - topologyKey: kubernetes.io/hostname - containers: - - args: - - agent - - -ui - - -server - - -client=0.0.0.0 - - -config-file=/etc/config/consul-config.json - - -bootstrap-expect=1 - - -ui-content-path=/default/consul/ - env: - - name: CHECKPOINT_DISABLE - value: "1" - image: consul:1.5.3 - imagePullPolicy: IfNotPresent - name: consul - ports: - - containerPort: 8300 - name: server - - containerPort: 8301 - name: serf - - containerPort: 8400 - name: client - - containerPort: 8500 - name: api - resources: - requests: - cpu: "4" - memory: 4Gi - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --namespace=$(POD_NAMESPACE) - - --pod-name=$(POD_NAME) - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: weaveworks/consul-sidekick:master-f18ad13 - imagePullPolicy: IfNotPresent - name: sidekick - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --web.listen-address=:8000 - - --statsd.mapping-config=/etc/config/mapping - image: prom/statsd-exporter:v0.12.2 - imagePullPolicy: IfNotPresent - name: statsd-exporter - ports: - - containerPort: 8000 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --consul.server=localhost:8500 - - --web.listen-address=:9107 - - --consul.timeout=1s - - --no-consul.health-summary - - --consul.allow_stale - image: prom/consul-exporter:v0.5.0 - imagePullPolicy: IfNotPresent - name: consul-exporter - ports: - - containerPort: 9107 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - serviceAccount: consul-sidekick - volumes: - - configMap: - name: consul - name: consul - - emptyDir: - medium: Memory - name: data ---- -apiVersion: apps/v1 -kind: Deployment metadata: name: distributor namespace: default @@ -669,6 +293,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: distributor spec: affinity: @@ -688,15 +313,16 @@ spec: - -distributor.health-check-ingesters=true - -distributor.ingestion-burst-size=200000 - -distributor.ingestion-rate-limit=10000 - - -distributor.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -distributor.ring.prefix= - - -distributor.ring.store=consul - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -distributor.ring.store=memberlist - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=1073741824 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.max-connection-age=2m - -server.grpc.keepalive.max-connection-age-grace=5m @@ -713,6 +339,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -752,6 +380,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: querier spec: affinity: @@ -772,12 +401,14 @@ spec: - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - -distributor.health-check-ingesters=true - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-address=query-frontend-discovery.default.svc.cluster.local:9095 - -querier.frontend-client.grpc-max-send-msg-size=104857600 - -querier.max-concurrent=8 @@ -786,10 +417,9 @@ spec: - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - -server.http-write-timeout=1m - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store.max-query-length=768h - -target=querier env: @@ -803,6 +433,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -911,6 +543,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: compactor spec: containers: @@ -927,13 +560,15 @@ spec: - -compactor.deletion-delay=2h - -compactor.max-closing-blocks-concurrency=2 - -compactor.max-opening-blocks-concurrency=4 - - -compactor.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -compactor.ring.prefix= - - -compactor.ring.store=consul + - -compactor.ring.store=memberlist - -compactor.ring.wait-stability-min-duration=1m - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true @@ -947,6 +582,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1003,6 +640,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ingester spec: affinity: @@ -1022,16 +660,18 @@ spec: - -distributor.health-check-ingesters=true - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-period=15s - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.num-tokens=512 - -ingester.ring.prefix= - -ingester.ring.readiness-check-ring-health=false - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc-max-concurrent-streams=10000 - -server.grpc.keepalive.min-time-between-pings=10s @@ -1046,6 +686,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1314,6 +956,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: store-gateway spec: affinity: @@ -1351,14 +994,16 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store-gateway.sharding-ring.tokens-file-path=/data/tokens - -store-gateway.sharding-ring.wait-stability-min-duration=1m - -target=store-gateway @@ -1370,6 +1015,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready diff --git a/operations/mimir-tests/test-disable-chunk-streaming-generated.yaml b/operations/mimir-tests/test-disable-chunk-streaming-generated.yaml index 626101efb9..761bc2582a 100644 --- a/operations/mimir-tests/test-disable-chunk-streaming-generated.yaml +++ b/operations/mimir-tests/test-disable-chunk-streaming-generated.yaml @@ -43,225 +43,6 @@ spec: name: store-gateway --- apiVersion: v1 -kind: ServiceAccount -metadata: - name: consul-sidekick - namespace: default ---- -apiVersion: v1 -data: - consul-config.json: '{"leave_on_terminate": true, "raft_snapshot_threshold": 128, - "raft_trailing_logs": 10000, "telemetry": {"dogstatsd_addr": "127.0.0.1:9125"}}' - mapping: | - mappings: - - match: consul.*.runtime.* - name: consul_runtime - labels: - type: $2 - - match: consul.runtime.total_gc_pause_ns - name: consul_runtime_total_gc_pause_ns - labels: - type: $2 - - match: consul.consul.health.service.query-tag.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3 - - match: consul.consul.health.service.query-tag.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4 - - match: consul.consul.health.service.query-tag.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11.$12 - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.dns.domain_query.*.*.*.*.* - name: consul_dns_domain_query - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.not-found.* - name: consul_health_service_not_found - labels: - query: $1 - - match: consul.consul.health.service.query.* - name: consul_health_service_query - labels: - query: $1 - - match: consul.*.memberlist.health.score - name: consul_memberlist_health_score - labels: {} - - match: consul.serf.queue.* - name: consul_serf_events - labels: - type: $1 - - match: consul.serf.snapshot.appendLine - name: consul_serf_snapshot_appendLine - labels: - type: $1 - - match: consul.serf.coordinate.adjustment-ms - name: consul_serf_coordinate_adjustment_ms - labels: {} - - match: consul.consul.rpc.query - name: consul_rpc_query - labels: {} - - match: consul.*.consul.session_ttl.active - name: consul_session_ttl_active - labels: {} - - match: consul.raft.rpc.* - name: consul_raft_rpc - labels: - type: $1 - - match: consul.raft.rpc.appendEntries.storeLogs - name: consul_raft_rpc_appendEntries_storeLogs - labels: - type: $1 - - match: consul.consul.fsm.persist - name: consul_fsm_persist - labels: {} - - match: consul.raft.fsm.apply - name: consul_raft_fsm_apply - labels: {} - - match: consul.raft.leader.lastContact - name: consul_raft_leader_lastcontact - labels: {} - - match: consul.raft.leader.dispatchLog - name: consul_raft_leader_dispatchLog - labels: {} - - match: consul.raft.commitTime - name: consul_raft_commitTime - labels: {} - - match: consul.raft.replication.appendEntries.logs.*.*.*.* - name: consul_raft_replication_appendEntries_logs - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.appendEntries.rpc.*.*.*.* - name: consul_raft_replication_appendEntries_rpc - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.heartbeat.*.*.*.* - name: consul_raft_replication_heartbeat - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.consul.rpc.request - name: consul_rpc_requests - labels: {} - - match: consul.consul.rpc.accept_conn - name: consul_rpc_accept_conn - labels: {} - - match: consul.memberlist.udp.* - name: consul_memberlist_udp - labels: - type: $1 - - match: consul.memberlist.tcp.* - name: consul_memberlist_tcp - labels: - type: $1 - - match: consul.memberlist.gossip - name: consul_memberlist_gossip - labels: {} - - match: consul.memberlist.probeNode - name: consul_memberlist_probenode - labels: {} - - match: consul.memberlist.pushPullNode - name: consul_memberlist_pushpullnode - labels: {} - - match: consul.http.* - name: consul_http_request - labels: - method: $1 - path: / - - match: consul.http.*.* - name: consul_http_request - labels: - method: $1 - path: /$2 - - match: consul.http.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3 - - match: consul.http.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4 - - match: consul.http.*.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4/$5 - - match: consul.consul.leader.barrier - name: consul_leader_barrier - labels: {} - - match: consul.consul.leader.reconcileMember - name: consul_leader_reconcileMember - labels: {} - - match: consul.consul.leader.reconcile - name: consul_leader_reconcile - labels: {} - - match: consul.consul.fsm.coordinate.batch-update - name: consul_fsm_coordinate_batch_update - labels: {} - - match: consul.consul.fsm.autopilot - name: consul_fsm_autopilot - labels: {} - - match: consul.consul.fsm.kvs.cas - name: consul_fsm_kvs_cas - labels: {} - - match: consul.consul.fsm.register - name: consul_fsm_register - labels: {} - - match: consul.consul.fsm.deregister - name: consul_fsm_deregister - labels: {} - - match: consul.consul.fsm.tombstone.reap - name: consul_fsm_tombstone_reap - labels: {} - - match: consul.consul.catalog.register - name: consul_catalog_register - labels: {} - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.leader.reapTombstones - name: consul_leader_reapTombstones - labels: {} -kind: ConfigMap -metadata: - name: consul - namespace: default ---- -apiVersion: v1 data: overrides.yaml: | ingester_stream_chunks_when_using_blocks: false @@ -271,38 +52,6 @@ metadata: name: overrides namespace: default --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: consul-sidekick - namespace: default -rules: -- apiGroups: - - "" - - extensions - - apps - resources: - - pods - - replicasets - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: consul-sidekick - namespace: default -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: consul-sidekick -subjects: -- kind: ServiceAccount - name: consul-sidekick - namespace: default ---- apiVersion: v1 kind: Service metadata: @@ -319,6 +68,9 @@ spec: - name: alertmanager-grpc port: 9095 targetPort: 9095 + - name: alertmanager-gossip-ring + port: 7946 + targetPort: 7946 selector: name: alertmanager --- @@ -338,41 +90,14 @@ spec: - name: compactor-grpc port: 9095 targetPort: 9095 + - name: compactor-gossip-ring + port: 7946 + targetPort: 7946 selector: name: compactor --- apiVersion: v1 kind: Service -metadata: - labels: - name: consul - name: consul - namespace: default -spec: - ports: - - name: consul-server - port: 8300 - targetPort: 8300 - - name: consul-serf - port: 8301 - targetPort: 8301 - - name: consul-client - port: 8400 - targetPort: 8400 - - name: consul-api - port: 8500 - targetPort: 8500 - - name: statsd-exporter-http-metrics - port: 8000 - targetPort: 8000 - - name: consul-exporter-http-metrics - port: 9107 - targetPort: 9107 - selector: - name: consul ---- -apiVersion: v1 -kind: Service metadata: labels: name: distributor @@ -387,11 +112,29 @@ spec: - name: distributor-grpc port: 9095 targetPort: 9095 + - name: distributor-gossip-ring + port: 7946 + targetPort: 7946 selector: name: distributor --- apiVersion: v1 kind: Service +metadata: + name: gossip-ring + namespace: default +spec: + clusterIP: None + ports: + - name: gossip-ring + port: 7946 + protocol: TCP + targetPort: 7946 + selector: + gossip_ring_member: "true" +--- +apiVersion: v1 +kind: Service metadata: labels: name: ingester @@ -405,6 +148,9 @@ spec: - name: ingester-grpc port: 9095 targetPort: 9095 + - name: ingester-gossip-ring + port: 7946 + targetPort: 7946 selector: name: ingester --- @@ -499,6 +245,9 @@ spec: - name: querier-grpc port: 9095 targetPort: 9095 + - name: querier-gossip-ring + port: 7946 + targetPort: 7946 selector: name: querier --- @@ -611,136 +360,14 @@ spec: - name: store-gateway-grpc port: 9095 targetPort: 9095 + - name: store-gateway-gossip-ring + port: 7946 + targetPort: 7946 selector: name: store-gateway --- apiVersion: apps/v1 kind: Deployment -metadata: - name: consul - namespace: default -spec: - minReadySeconds: 10 - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: consul - template: - metadata: - annotations: - consul-hash: e56ef6821a3557604caccaf6d5820239 - labels: - name: consul - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: consul - topologyKey: kubernetes.io/hostname - - labelSelector: - matchLabels: - name: ingester - namespaces: - - default - topologyKey: kubernetes.io/hostname - containers: - - args: - - agent - - -ui - - -server - - -client=0.0.0.0 - - -config-file=/etc/config/consul-config.json - - -bootstrap-expect=1 - - -ui-content-path=/default/consul/ - env: - - name: CHECKPOINT_DISABLE - value: "1" - image: consul:1.5.3 - imagePullPolicy: IfNotPresent - name: consul - ports: - - containerPort: 8300 - name: server - - containerPort: 8301 - name: serf - - containerPort: 8400 - name: client - - containerPort: 8500 - name: api - resources: - requests: - cpu: "4" - memory: 4Gi - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --namespace=$(POD_NAMESPACE) - - --pod-name=$(POD_NAME) - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: weaveworks/consul-sidekick:master-f18ad13 - imagePullPolicy: IfNotPresent - name: sidekick - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --web.listen-address=:8000 - - --statsd.mapping-config=/etc/config/mapping - image: prom/statsd-exporter:v0.12.2 - imagePullPolicy: IfNotPresent - name: statsd-exporter - ports: - - containerPort: 8000 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --consul.server=localhost:8500 - - --web.listen-address=:9107 - - --consul.timeout=1s - - --no-consul.health-summary - - --consul.allow_stale - image: prom/consul-exporter:v0.5.0 - imagePullPolicy: IfNotPresent - name: consul-exporter - ports: - - containerPort: 9107 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - serviceAccount: consul-sidekick - volumes: - - configMap: - name: consul - name: consul - - emptyDir: - medium: Memory - name: data ---- -apiVersion: apps/v1 -kind: Deployment metadata: name: distributor namespace: default @@ -758,6 +385,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: distributor spec: affinity: @@ -777,15 +405,16 @@ spec: - -distributor.health-check-ingesters=true - -distributor.ingestion-burst-size=200000 - -distributor.ingestion-rate-limit=10000 - - -distributor.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -distributor.ring.prefix= - - -distributor.ring.store=consul - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -distributor.ring.store=memberlist - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=1073741824 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.max-connection-age=2m - -server.grpc.keepalive.max-connection-age-grace=5m @@ -802,6 +431,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -841,6 +472,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: querier spec: affinity: @@ -861,12 +493,14 @@ spec: - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - -distributor.health-check-ingesters=true - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 - -querier.max-concurrent=8 - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 @@ -875,10 +509,9 @@ spec: - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - -server.http-write-timeout=1m - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store.max-query-length=768h - -target=querier env: @@ -892,6 +525,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1066,6 +701,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ruler spec: affinity: @@ -1086,27 +722,27 @@ spec: - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - -distributor.health-check-ingesters=true - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.backend=gcs - -ruler-storage.gcs.bucket-name=rules-bucket - -ruler.alertmanager-url=http://alertmanager.default.svc.cluster.local/alertmanager - -ruler.max-rule-groups-per-tenant=35 - -ruler.max-rules-per-rule-group=20 - - -ruler.ring.consul.hostname=consul.default.svc.cluster.local:8500 - - -ruler.ring.store=consul + - -ruler.ring.store=memberlist - -ruler.rule-path=/rules - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store.max-query-length=768h - -target=ruler image: grafana/mimir:2.1.0 @@ -1155,17 +791,20 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: alertmanager spec: containers: - args: - -alertmanager-storage.backend=gcs - -alertmanager-storage.gcs.bucket-name=alerts-bucket - - -alertmanager.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -alertmanager.sharding-ring.replication-factor=3 - - -alertmanager.sharding-ring.store=consul + - -alertmanager.sharding-ring.store=memberlist - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true @@ -1184,6 +823,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1239,6 +880,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: compactor spec: containers: @@ -1255,13 +897,15 @@ spec: - -compactor.deletion-delay=2h - -compactor.max-closing-blocks-concurrency=2 - -compactor.max-opening-blocks-concurrency=4 - - -compactor.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -compactor.ring.prefix= - - -compactor.ring.store=consul + - -compactor.ring.store=memberlist - -compactor.ring.wait-stability-min-duration=1m - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true @@ -1275,6 +919,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1331,6 +977,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ingester spec: affinity: @@ -1350,16 +997,18 @@ spec: - -distributor.health-check-ingesters=true - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-period=15s - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.num-tokens=512 - -ingester.ring.prefix= - -ingester.ring.readiness-check-ring-health=false - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc-max-concurrent-streams=10000 - -server.grpc.keepalive.min-time-between-pings=10s @@ -1374,6 +1023,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1642,6 +1293,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: store-gateway spec: affinity: @@ -1679,14 +1331,16 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store-gateway.sharding-ring.tokens-file-path=/data/tokens - -store-gateway.sharding-ring.wait-stability-min-duration=1m - -target=store-gateway @@ -1698,6 +1352,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready diff --git a/operations/mimir-tests/test-gossip-generated.yaml b/operations/mimir-tests/test-gossip-generated.yaml deleted file mode 100644 index 6ea4883c89..0000000000 --- a/operations/mimir-tests/test-gossip-generated.yaml +++ /dev/null @@ -1,1426 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: default ---- -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - labels: - name: alertmanager-pdb - name: alertmanager-pdb - namespace: default -spec: - maxUnavailable: 1 - selector: - matchLabels: - name: alertmanager ---- -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - labels: - name: ingester-pdb - name: ingester-pdb - namespace: default -spec: - maxUnavailable: 1 - selector: - matchLabels: - name: ingester ---- -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - labels: - name: store-gateway-pdb - name: store-gateway-pdb - namespace: default -spec: - maxUnavailable: 2 - selector: - matchLabels: - name: store-gateway ---- -apiVersion: v1 -data: - overrides.yaml: | - overrides: {} -kind: ConfigMap -metadata: - name: overrides - namespace: default ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: alertmanager - name: alertmanager - namespace: default -spec: - clusterIP: None - ports: - - name: alertmanager-http-metrics - port: 8080 - targetPort: 8080 - - name: alertmanager-grpc - port: 9095 - targetPort: 9095 - - name: alertmanager-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: alertmanager ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: compactor - name: compactor - namespace: default -spec: - clusterIP: None - ports: - - name: compactor-http-metrics - port: 8080 - targetPort: 8080 - - name: compactor-grpc - port: 9095 - targetPort: 9095 - - name: compactor-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: compactor ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: distributor - name: distributor - namespace: default -spec: - clusterIP: None - ports: - - name: distributor-http-metrics - port: 8080 - targetPort: 8080 - - name: distributor-grpc - port: 9095 - targetPort: 9095 - - name: distributor-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: distributor ---- -apiVersion: v1 -kind: Service -metadata: - name: gossip-ring - namespace: default -spec: - clusterIP: None - ports: - - name: gossip-ring - port: 7946 - protocol: TCP - targetPort: 7946 - selector: - gossip_ring_member: "true" ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: ingester - name: ingester - namespace: default -spec: - ports: - - name: ingester-http-metrics - port: 8080 - targetPort: 8080 - - name: ingester-grpc - port: 9095 - targetPort: 9095 - - name: ingester-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: ingester ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: memcached - name: memcached - namespace: default -spec: - clusterIP: None - ports: - - name: memcached-client - port: 11211 - targetPort: 11211 - - name: exporter-http-metrics - port: 9150 - targetPort: 9150 - selector: - name: memcached ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: memcached-frontend - name: memcached-frontend - namespace: default -spec: - clusterIP: None - ports: - - name: memcached-client - port: 11211 - targetPort: 11211 - - name: exporter-http-metrics - port: 9150 - targetPort: 9150 - selector: - name: memcached-frontend ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: memcached-index-queries - name: memcached-index-queries - namespace: default -spec: - clusterIP: None - ports: - - name: memcached-client - port: 11211 - targetPort: 11211 - - name: exporter-http-metrics - port: 9150 - targetPort: 9150 - selector: - name: memcached-index-queries ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: memcached-metadata - name: memcached-metadata - namespace: default -spec: - clusterIP: None - ports: - - name: memcached-client - port: 11211 - targetPort: 11211 - - name: exporter-http-metrics - port: 9150 - targetPort: 9150 - selector: - name: memcached-metadata ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: querier - name: querier - namespace: default -spec: - ports: - - name: querier-http-metrics - port: 8080 - targetPort: 8080 - - name: querier-grpc - port: 9095 - targetPort: 9095 - - name: querier-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: querier ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: query-frontend - name: query-frontend - namespace: default -spec: - ports: - - name: query-frontend-http-metrics - port: 8080 - targetPort: 8080 - - name: query-frontend-grpc - port: 9095 - targetPort: 9095 - selector: - name: query-frontend ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: query-frontend - name: query-frontend-discovery - namespace: default -spec: - clusterIP: None - ports: - - name: query-frontend-http-metrics - port: 8080 - targetPort: 8080 - - name: query-frontend-grpc - port: 9095 - targetPort: 9095 - publishNotReadyAddresses: true - selector: - name: query-frontend ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: query-scheduler - name: query-scheduler - namespace: default -spec: - ports: - - name: query-scheduler-http-metrics - port: 8080 - targetPort: 8080 - - name: query-scheduler-grpc - port: 9095 - targetPort: 9095 - selector: - name: query-scheduler ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: query-scheduler - name: query-scheduler-discovery - namespace: default -spec: - clusterIP: None - ports: - - name: query-scheduler-http-metrics - port: 8080 - targetPort: 8080 - - name: query-scheduler-grpc - port: 9095 - targetPort: 9095 - publishNotReadyAddresses: true - selector: - name: query-scheduler ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: ruler - name: ruler - namespace: default -spec: - ports: - - name: ruler-http-metrics - port: 8080 - targetPort: 8080 - - name: ruler-grpc - port: 9095 - targetPort: 9095 - selector: - name: ruler ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: store-gateway - name: store-gateway - namespace: default -spec: - ports: - - name: store-gateway-http-metrics - port: 8080 - targetPort: 8080 - - name: store-gateway-grpc - port: 9095 - targetPort: 9095 - - name: store-gateway-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: store-gateway ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: distributor - namespace: default -spec: - minReadySeconds: 10 - replicas: 3 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: distributor - strategy: - rollingUpdate: - maxSurge: 5 - maxUnavailable: 1 - template: - metadata: - labels: - gossip_ring_member: "true" - name: distributor - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: distributor - topologyKey: kubernetes.io/hostname - containers: - - args: - - -distributor.ha-tracker.enable=true - - -distributor.ha-tracker.enable-for-all-users=true - - -distributor.ha-tracker.etcd.endpoints=etcd-client.default.svc.cluster.local.:2379 - - -distributor.ha-tracker.prefix=prom_ha/ - - -distributor.ha-tracker.store=etcd - - -distributor.health-check-ingesters=true - - -distributor.ingestion-burst-size=200000 - - -distributor.ingestion-rate-limit=10000 - - -distributor.ring.prefix= - - -distributor.ring.store=memberlist - - -ingester.ring.heartbeat-timeout=10m - - -ingester.ring.prefix= - - -ingester.ring.replication-factor=3 - - -ingester.ring.store=memberlist - - -mem-ballast-size-bytes=1073741824 - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.max-connection-age=2m - - -server.grpc.keepalive.max-connection-age-grace=5m - - -server.grpc.keepalive.max-connection-idle=1m - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -target=distributor - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: distributor - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 4Gi - requests: - cpu: "2" - memory: 2Gi - volumeMounts: - - mountPath: /etc/mimir - name: overrides - volumes: - - configMap: - name: overrides - name: overrides ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: querier - namespace: default -spec: - minReadySeconds: 10 - replicas: 6 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: querier - strategy: - rollingUpdate: - maxSurge: 5 - maxUnavailable: 1 - template: - metadata: - labels: - gossip_ring_member: "true" - name: querier - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: querier - topologyKey: kubernetes.io/hostname - containers: - - args: - - -blocks-storage.backend=gcs - - -blocks-storage.bucket-store.metadata-cache.backend=memcached - - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 - - -blocks-storage.bucket-store.sync-dir=/data/tsdb - - -blocks-storage.bucket-store.sync-interval=15m - - -blocks-storage.gcs.bucket-name=blocks-bucket - - -distributor.health-check-ingesters=true - - -ingester.ring.heartbeat-timeout=10m - - -ingester.ring.prefix= - - -ingester.ring.replication-factor=3 - - -ingester.ring.store=memberlist - - -mem-ballast-size-bytes=268435456 - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -querier.frontend-client.grpc-max-send-msg-size=104857600 - - -querier.max-concurrent=8 - - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -server.http-write-timeout=1m - - -store-gateway.sharding-ring.prefix= - - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=memberlist - - -store.max-query-length=768h - - -target=querier - env: - - name: JAEGER_REPORTER_MAX_QUEUE_SIZE - value: "1024" - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: querier - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 24Gi - requests: - cpu: "1" - memory: 12Gi - volumeMounts: - - mountPath: /etc/mimir - name: overrides - volumes: - - configMap: - name: overrides - name: overrides ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: query-frontend - namespace: default -spec: - minReadySeconds: 10 - replicas: 2 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: query-frontend - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - template: - metadata: - labels: - name: query-frontend - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: query-frontend - topologyKey: kubernetes.io/hostname - containers: - - args: - - -query-frontend.align-querier-with-step=false - - -query-frontend.cache-results=true - - -query-frontend.max-cache-freshness=10m - - -query-frontend.results-cache.backend=memcached - - -query-frontend.results-cache.memcached.addresses=dnssrvnoa+memcached-frontend.default.svc.cluster.local:11211 - - -query-frontend.results-cache.memcached.timeout=500ms - - -query-frontend.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -server.http-write-timeout=1m - - -store.max-query-length=12000h - - -target=query-frontend - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: query-frontend - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 1200Mi - requests: - cpu: "2" - memory: 600Mi - volumeMounts: - - mountPath: /etc/mimir - name: overrides - volumes: - - configMap: - name: overrides - name: overrides ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: query-scheduler - namespace: default -spec: - minReadySeconds: 10 - replicas: 2 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: query-scheduler - strategy: - rollingUpdate: - maxSurge: 0 - maxUnavailable: 1 - template: - metadata: - labels: - name: query-scheduler - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: query-scheduler - topologyKey: kubernetes.io/hostname - containers: - - args: - - -query-scheduler.max-outstanding-requests-per-tenant=100 - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -target=query-scheduler - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: query-scheduler - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 2Gi - requests: - cpu: "2" - memory: 1Gi - volumeMounts: - - mountPath: /etc/mimir - name: overrides - volumes: - - configMap: - name: overrides - name: overrides ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: ruler - namespace: default -spec: - minReadySeconds: 10 - replicas: 2 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: ruler - strategy: - rollingUpdate: - maxSurge: 0 - maxUnavailable: 1 - template: - metadata: - labels: - gossip_ring_member: "true" - name: ruler - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: ruler - topologyKey: kubernetes.io/hostname - containers: - - args: - - -blocks-storage.backend=gcs - - -blocks-storage.bucket-store.metadata-cache.backend=memcached - - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 - - -blocks-storage.bucket-store.sync-dir=/data/tsdb - - -blocks-storage.bucket-store.sync-interval=15m - - -blocks-storage.gcs.bucket-name=blocks-bucket - - -distributor.health-check-ingesters=true - - -ingester.ring.heartbeat-timeout=10m - - -ingester.ring.prefix= - - -ingester.ring.replication-factor=3 - - -ingester.ring.store=memberlist - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -ruler-storage.backend=gcs - - -ruler-storage.gcs.bucket-name=rules-bucket - - -ruler.alertmanager-url=http://alertmanager.default.svc.cluster.local/alertmanager - - -ruler.max-rule-groups-per-tenant=35 - - -ruler.max-rules-per-rule-group=20 - - -ruler.ring.store=memberlist - - -ruler.rule-path=/rules - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.prefix= - - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=memberlist - - -store.max-query-length=768h - - -target=ruler - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: ruler - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - cpu: "16" - memory: 16Gi - requests: - cpu: "1" - memory: 6Gi - volumeMounts: - - mountPath: /etc/mimir - name: overrides - terminationGracePeriodSeconds: 600 - volumes: - - configMap: - name: overrides - name: overrides ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - labels: - name: alertmanager - name: alertmanager - namespace: default -spec: - replicas: 3 - selector: - matchLabels: - name: alertmanager - serviceName: alertmanager - template: - metadata: - labels: - gossip_ring_member: "true" - name: alertmanager - spec: - containers: - - args: - - -alertmanager-storage.backend=gcs - - -alertmanager-storage.gcs.bucket-name=alerts-bucket - - -alertmanager.sharding-ring.replication-factor=3 - - -alertmanager.sharding-ring.store=memberlist - - -alertmanager.storage.path=/data - - -alertmanager.web.external-url=http://test/alertmanager - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -target=alertmanager - env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: alertmanager - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 15Gi - requests: - cpu: "2" - memory: 10Gi - volumeMounts: - - mountPath: /data - name: alertmanager-data - - mountPath: /etc/mimir - name: overrides - securityContext: - runAsUser: 0 - terminationGracePeriodSeconds: 900 - volumes: - - configMap: - name: overrides - name: overrides - updateStrategy: - type: RollingUpdate - volumeClaimTemplates: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: alertmanager-data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 100Gi ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - labels: - name: compactor - name: compactor - namespace: default -spec: - podManagementPolicy: Parallel - replicas: 1 - selector: - matchLabels: - name: compactor - serviceName: compactor - template: - metadata: - labels: - gossip_ring_member: "true" - name: compactor - spec: - containers: - - args: - - -blocks-storage.backend=gcs - - -blocks-storage.gcs.bucket-name=blocks-bucket - - -compactor.block-ranges=2h,12h,24h - - -compactor.blocks-retention-period=0 - - -compactor.cleanup-interval=15m - - -compactor.compaction-concurrency=1 - - -compactor.compaction-interval=30m - - -compactor.compactor-tenant-shard-size=1 - - -compactor.data-dir=/data - - -compactor.deletion-delay=2h - - -compactor.max-closing-blocks-concurrency=2 - - -compactor.max-opening-blocks-concurrency=4 - - -compactor.ring.prefix= - - -compactor.ring.store=memberlist - - -compactor.ring.wait-stability-min-duration=1m - - -compactor.split-and-merge-shards=0 - - -compactor.split-groups=1 - - -compactor.symbols-flushers-concurrency=4 - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -target=compactor - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: compactor - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 6Gi - requests: - cpu: 1 - memory: 6Gi - volumeMounts: - - mountPath: /data - name: compactor-data - - mountPath: /etc/mimir - name: overrides - securityContext: - runAsUser: 0 - terminationGracePeriodSeconds: 900 - volumes: - - configMap: - name: overrides - name: overrides - updateStrategy: - type: RollingUpdate - volumeClaimTemplates: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: compactor-data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 250Gi - storageClassName: standard ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - labels: - name: ingester - name: ingester - namespace: default -spec: - podManagementPolicy: Parallel - replicas: 3 - selector: - matchLabels: - name: ingester - serviceName: ingester - template: - metadata: - labels: - gossip_ring_member: "true" - name: ingester - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: ingester - topologyKey: kubernetes.io/hostname - containers: - - args: - - -blocks-storage.backend=gcs - - -blocks-storage.gcs.bucket-name=blocks-bucket - - -blocks-storage.tsdb.block-ranges-period=2h - - -blocks-storage.tsdb.dir=/data/tsdb - - -blocks-storage.tsdb.ship-interval=1m - - -distributor.health-check-ingesters=true - - -ingester.max-global-series-per-metric=20000 - - -ingester.max-global-series-per-user=150000 - - -ingester.ring.heartbeat-period=15s - - -ingester.ring.heartbeat-timeout=10m - - -ingester.ring.num-tokens=512 - - -ingester.ring.prefix= - - -ingester.ring.readiness-check-ring-health=false - - -ingester.ring.replication-factor=3 - - -ingester.ring.store=memberlist - - -ingester.ring.tokens-file-path=/data/tokens - - -ingester.ring.unregister-on-shutdown=true - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc-max-concurrent-streams=10000 - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -target=ingester - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: ingester - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 25Gi - requests: - cpu: "4" - memory: 15Gi - volumeMounts: - - mountPath: /data - name: ingester-data - - mountPath: /etc/mimir - name: overrides - securityContext: - runAsUser: 0 - terminationGracePeriodSeconds: 1200 - volumes: - - configMap: - name: overrides - name: overrides - updateStrategy: - type: RollingUpdate - volumeClaimTemplates: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: ingester-data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 100Gi - storageClassName: fast ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: memcached - namespace: default -spec: - replicas: 3 - selector: - matchLabels: - name: memcached - serviceName: memcached - template: - metadata: - labels: - name: memcached - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: memcached - topologyKey: kubernetes.io/hostname - containers: - - args: - - -m 6144 - - -I 1m - - -c 16384 - - -v - image: memcached:1.6.9-alpine - imagePullPolicy: IfNotPresent - name: memcached - ports: - - containerPort: 11211 - name: client - resources: - limits: - memory: 9Gi - requests: - cpu: 500m - memory: 6552Mi - - args: - - --memcached.address=localhost:11211 - - --web.listen-address=0.0.0.0:9150 - image: prom/memcached-exporter:v0.6.0 - imagePullPolicy: IfNotPresent - name: exporter - ports: - - containerPort: 9150 - name: http-metrics - updateStrategy: - type: RollingUpdate ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: memcached-frontend - namespace: default -spec: - replicas: 3 - selector: - matchLabels: - name: memcached-frontend - serviceName: memcached-frontend - template: - metadata: - labels: - name: memcached-frontend - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: memcached-frontend - topologyKey: kubernetes.io/hostname - containers: - - args: - - -m 1024 - - -I 5m - - -c 1024 - - -v - image: memcached:1.6.9-alpine - imagePullPolicy: IfNotPresent - name: memcached - ports: - - containerPort: 11211 - name: client - resources: - limits: - memory: 1536Mi - requests: - cpu: 500m - memory: 1329Mi - - args: - - --memcached.address=localhost:11211 - - --web.listen-address=0.0.0.0:9150 - image: prom/memcached-exporter:v0.6.0 - imagePullPolicy: IfNotPresent - name: exporter - ports: - - containerPort: 9150 - name: http-metrics - updateStrategy: - type: RollingUpdate ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: memcached-index-queries - namespace: default -spec: - replicas: 3 - selector: - matchLabels: - name: memcached-index-queries - serviceName: memcached-index-queries - template: - metadata: - labels: - name: memcached-index-queries - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: memcached-index-queries - topologyKey: kubernetes.io/hostname - containers: - - args: - - -m 1024 - - -I 5m - - -c 16384 - - -v - image: memcached:1.6.9-alpine - imagePullPolicy: IfNotPresent - name: memcached - ports: - - containerPort: 11211 - name: client - resources: - limits: - memory: 1536Mi - requests: - cpu: 500m - memory: 1329Mi - - args: - - --memcached.address=localhost:11211 - - --web.listen-address=0.0.0.0:9150 - image: prom/memcached-exporter:v0.6.0 - imagePullPolicy: IfNotPresent - name: exporter - ports: - - containerPort: 9150 - name: http-metrics - updateStrategy: - type: RollingUpdate ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: memcached-metadata - namespace: default -spec: - replicas: 1 - selector: - matchLabels: - name: memcached-metadata - serviceName: memcached-metadata - template: - metadata: - labels: - name: memcached-metadata - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: memcached-metadata - topologyKey: kubernetes.io/hostname - containers: - - args: - - -m 512 - - -I 1m - - -c 16384 - - -v - image: memcached:1.6.9-alpine - imagePullPolicy: IfNotPresent - name: memcached - ports: - - containerPort: 11211 - name: client - resources: - limits: - memory: 768Mi - requests: - cpu: 500m - memory: 715Mi - - args: - - --memcached.address=localhost:11211 - - --web.listen-address=0.0.0.0:9150 - image: prom/memcached-exporter:v0.6.0 - imagePullPolicy: IfNotPresent - name: exporter - ports: - - containerPort: 9150 - name: http-metrics - updateStrategy: - type: RollingUpdate ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - labels: - name: store-gateway - name: store-gateway - namespace: default -spec: - podManagementPolicy: Parallel - replicas: 3 - selector: - matchLabels: - name: store-gateway - serviceName: store-gateway - template: - metadata: - labels: - gossip_ring_member: "true" - name: store-gateway - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: store-gateway - topologyKey: kubernetes.io/hostname - containers: - - args: - - -blocks-storage.backend=gcs - - -blocks-storage.bucket-store.chunks-cache.backend=memcached - - -blocks-storage.bucket-store.chunks-cache.memcached.addresses=dnssrvnoa+memcached.default.svc.cluster.local:11211 - - -blocks-storage.bucket-store.chunks-cache.memcached.max-async-concurrency=50 - - -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100 - - -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=100 - - -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576 - - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms - - -blocks-storage.bucket-store.index-cache.backend=memcached - - -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local:11211 - - -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50 - - -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100 - - -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=100 - - -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880 - - -blocks-storage.bucket-store.index-header-lazy-loading-enabled=true - - -blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout=60m - - -blocks-storage.bucket-store.max-chunk-pool-bytes=12884901888 - - -blocks-storage.bucket-store.metadata-cache.backend=memcached - - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-get-multi-concurrency=100 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-idle-connections=100 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 - - -blocks-storage.bucket-store.sync-dir=/data/tsdb - - -blocks-storage.bucket-store.sync-interval=15m - - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.prefix= - - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=memberlist - - -store-gateway.sharding-ring.tokens-file-path=/data/tokens - - -store-gateway.sharding-ring.wait-stability-min-duration=1m - - -target=store-gateway - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: store-gateway - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 18Gi - requests: - cpu: "1" - memory: 12Gi - volumeMounts: - - mountPath: /data - name: store-gateway-data - - mountPath: /etc/mimir - name: overrides - securityContext: - runAsUser: 0 - terminationGracePeriodSeconds: 120 - volumes: - - configMap: - name: overrides - name: overrides - updateStrategy: - type: RollingUpdate - volumeClaimTemplates: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: store-gateway-data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 50Gi - storageClassName: standard ---- -apiVersion: etcd.database.coreos.com/v1beta2 -kind: EtcdCluster -metadata: - annotations: - etcd.database.coreos.com/scope: clusterwide - name: etcd - namespace: default -spec: - pod: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - etcd_cluster: etcd - topologyKey: kubernetes.io/hostname - annotations: - prometheus.io/port: "2379" - prometheus.io/scrape: "true" - etcdEnv: - - name: ETCD_AUTO_COMPACTION_RETENTION - value: 1h - labels: - name: etcd - resources: - limits: - memory: 512Mi - requests: - cpu: 500m - memory: 512Mi - size: 3 - version: 3.3.13 diff --git a/operations/mimir-tests/test-gossip-multi-zone-generated.yaml b/operations/mimir-tests/test-gossip-multi-zone-generated.yaml deleted file mode 100644 index 90b24fa612..0000000000 --- a/operations/mimir-tests/test-gossip-multi-zone-generated.yaml +++ /dev/null @@ -1,2161 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: default ---- -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - labels: - name: alertmanager-pdb - name: alertmanager-pdb - namespace: default -spec: - maxUnavailable: 1 - selector: - matchLabels: - name: alertmanager ---- -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - labels: - name: ingester-rollout-pdb - name: ingester-rollout-pdb - namespace: default -spec: - maxUnavailable: 1 - selector: - matchLabels: - rollout-group: ingester ---- -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - labels: - name: store-gateway-rollout-pdb - name: store-gateway-rollout-pdb - namespace: default -spec: - maxUnavailable: 1 - selector: - matchLabels: - rollout-group: store-gateway ---- -apiVersion: v1 -kind: ServiceAccount -metadata: - name: rollout-operator - namespace: default ---- -apiVersion: v1 -data: - overrides.yaml: | - overrides: {} -kind: ConfigMap -metadata: - name: overrides - namespace: default ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: rollout-operator-role - namespace: default -rules: -- apiGroups: - - "" - resources: - - pods - verbs: - - list - - get - - watch - - delete -- apiGroups: - - apps - resources: - - statefulsets - verbs: - - list - - get - - watch -- apiGroups: - - apps - resources: - - statefulsets/status - verbs: - - update ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: rollout-operator-rolebinding - namespace: default -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: rollout-operator-role -subjects: -- kind: ServiceAccount - name: rollout-operator - namespace: default ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: alertmanager - name: alertmanager - namespace: default -spec: - clusterIP: None - ports: - - name: alertmanager-http-metrics - port: 8080 - targetPort: 8080 - - name: alertmanager-grpc - port: 9095 - targetPort: 9095 - - name: alertmanager-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: alertmanager ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: compactor - name: compactor - namespace: default -spec: - clusterIP: None - ports: - - name: compactor-http-metrics - port: 8080 - targetPort: 8080 - - name: compactor-grpc - port: 9095 - targetPort: 9095 - - name: compactor-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: compactor ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: distributor - name: distributor - namespace: default -spec: - clusterIP: None - ports: - - name: distributor-http-metrics - port: 8080 - targetPort: 8080 - - name: distributor-grpc - port: 9095 - targetPort: 9095 - - name: distributor-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: distributor ---- -apiVersion: v1 -kind: Service -metadata: - name: gossip-ring - namespace: default -spec: - clusterIP: None - ports: - - name: gossip-ring - port: 7946 - protocol: TCP - targetPort: 7946 - selector: - gossip_ring_member: "true" ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: ingester-zone-a - name: ingester-zone-a - namespace: default -spec: - clusterIP: None - ports: - - name: ingester-http-metrics - port: 8080 - targetPort: 8080 - - name: ingester-grpc - port: 9095 - targetPort: 9095 - - name: ingester-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: ingester-zone-a - rollout-group: ingester ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: ingester-zone-b - name: ingester-zone-b - namespace: default -spec: - clusterIP: None - ports: - - name: ingester-http-metrics - port: 8080 - targetPort: 8080 - - name: ingester-grpc - port: 9095 - targetPort: 9095 - - name: ingester-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: ingester-zone-b - rollout-group: ingester ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: ingester-zone-c - name: ingester-zone-c - namespace: default -spec: - clusterIP: None - ports: - - name: ingester-http-metrics - port: 8080 - targetPort: 8080 - - name: ingester-grpc - port: 9095 - targetPort: 9095 - - name: ingester-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: ingester-zone-c - rollout-group: ingester ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: memcached - name: memcached - namespace: default -spec: - clusterIP: None - ports: - - name: memcached-client - port: 11211 - targetPort: 11211 - - name: exporter-http-metrics - port: 9150 - targetPort: 9150 - selector: - name: memcached ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: memcached-frontend - name: memcached-frontend - namespace: default -spec: - clusterIP: None - ports: - - name: memcached-client - port: 11211 - targetPort: 11211 - - name: exporter-http-metrics - port: 9150 - targetPort: 9150 - selector: - name: memcached-frontend ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: memcached-index-queries - name: memcached-index-queries - namespace: default -spec: - clusterIP: None - ports: - - name: memcached-client - port: 11211 - targetPort: 11211 - - name: exporter-http-metrics - port: 9150 - targetPort: 9150 - selector: - name: memcached-index-queries ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: memcached-metadata - name: memcached-metadata - namespace: default -spec: - clusterIP: None - ports: - - name: memcached-client - port: 11211 - targetPort: 11211 - - name: exporter-http-metrics - port: 9150 - targetPort: 9150 - selector: - name: memcached-metadata ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: querier - name: querier - namespace: default -spec: - ports: - - name: querier-http-metrics - port: 8080 - targetPort: 8080 - - name: querier-grpc - port: 9095 - targetPort: 9095 - - name: querier-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: querier ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: query-frontend - name: query-frontend - namespace: default -spec: - ports: - - name: query-frontend-http-metrics - port: 8080 - targetPort: 8080 - - name: query-frontend-grpc - port: 9095 - targetPort: 9095 - selector: - name: query-frontend ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: query-frontend - name: query-frontend-discovery - namespace: default -spec: - clusterIP: None - ports: - - name: query-frontend-http-metrics - port: 8080 - targetPort: 8080 - - name: query-frontend-grpc - port: 9095 - targetPort: 9095 - publishNotReadyAddresses: true - selector: - name: query-frontend ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: query-scheduler - name: query-scheduler - namespace: default -spec: - ports: - - name: query-scheduler-http-metrics - port: 8080 - targetPort: 8080 - - name: query-scheduler-grpc - port: 9095 - targetPort: 9095 - selector: - name: query-scheduler ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: query-scheduler - name: query-scheduler-discovery - namespace: default -spec: - clusterIP: None - ports: - - name: query-scheduler-http-metrics - port: 8080 - targetPort: 8080 - - name: query-scheduler-grpc - port: 9095 - targetPort: 9095 - publishNotReadyAddresses: true - selector: - name: query-scheduler ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: ruler - name: ruler - namespace: default -spec: - ports: - - name: ruler-http-metrics - port: 8080 - targetPort: 8080 - - name: ruler-grpc - port: 9095 - targetPort: 9095 - selector: - name: ruler ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: store-gateway-multi-zone - name: store-gateway-multi-zone - namespace: default -spec: - ports: - - name: store-gateway-http-metrics - port: 80 - protocol: TCP - targetPort: 80 - selector: - rollout-group: store-gateway ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: store-gateway-zone-a - name: store-gateway-zone-a - namespace: default -spec: - clusterIP: None - ports: - - name: store-gateway-http-metrics - port: 8080 - targetPort: 8080 - - name: store-gateway-grpc - port: 9095 - targetPort: 9095 - - name: store-gateway-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: store-gateway-zone-a - rollout-group: store-gateway ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: store-gateway-zone-b - name: store-gateway-zone-b - namespace: default -spec: - clusterIP: None - ports: - - name: store-gateway-http-metrics - port: 8080 - targetPort: 8080 - - name: store-gateway-grpc - port: 9095 - targetPort: 9095 - - name: store-gateway-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: store-gateway-zone-b - rollout-group: store-gateway ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: store-gateway-zone-c - name: store-gateway-zone-c - namespace: default -spec: - clusterIP: None - ports: - - name: store-gateway-http-metrics - port: 8080 - targetPort: 8080 - - name: store-gateway-grpc - port: 9095 - targetPort: 9095 - - name: store-gateway-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: store-gateway-zone-c - rollout-group: store-gateway ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: distributor - namespace: default -spec: - minReadySeconds: 10 - replicas: 3 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: distributor - strategy: - rollingUpdate: - maxSurge: 5 - maxUnavailable: 1 - template: - metadata: - labels: - gossip_ring_member: "true" - name: distributor - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: distributor - topologyKey: kubernetes.io/hostname - containers: - - args: - - -distributor.ha-tracker.enable=true - - -distributor.ha-tracker.enable-for-all-users=true - - -distributor.ha-tracker.etcd.endpoints=etcd-client.default.svc.cluster.local.:2379 - - -distributor.ha-tracker.prefix=prom_ha/ - - -distributor.ha-tracker.store=etcd - - -distributor.health-check-ingesters=true - - -distributor.ingestion-burst-size=200000 - - -distributor.ingestion-rate-limit=10000 - - -distributor.ring.prefix= - - -distributor.ring.store=memberlist - - -ingester.ring.heartbeat-timeout=10m - - -ingester.ring.prefix= - - -ingester.ring.replication-factor=3 - - -ingester.ring.store=memberlist - - -ingester.ring.zone-awareness-enabled=true - - -mem-ballast-size-bytes=1073741824 - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.max-connection-age=2m - - -server.grpc.keepalive.max-connection-age-grace=5m - - -server.grpc.keepalive.max-connection-idle=1m - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -target=distributor - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: distributor - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 4Gi - requests: - cpu: "2" - memory: 2Gi - volumeMounts: - - mountPath: /etc/mimir - name: overrides - volumes: - - configMap: - name: overrides - name: overrides ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: querier - namespace: default -spec: - minReadySeconds: 10 - replicas: 6 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: querier - strategy: - rollingUpdate: - maxSurge: 5 - maxUnavailable: 1 - template: - metadata: - labels: - gossip_ring_member: "true" - name: querier - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: querier - topologyKey: kubernetes.io/hostname - containers: - - args: - - -blocks-storage.backend=gcs - - -blocks-storage.bucket-store.metadata-cache.backend=memcached - - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 - - -blocks-storage.bucket-store.sync-dir=/data/tsdb - - -blocks-storage.bucket-store.sync-interval=15m - - -blocks-storage.gcs.bucket-name=blocks-bucket - - -distributor.health-check-ingesters=true - - -ingester.ring.heartbeat-timeout=10m - - -ingester.ring.prefix= - - -ingester.ring.replication-factor=3 - - -ingester.ring.store=memberlist - - -ingester.ring.zone-awareness-enabled=true - - -mem-ballast-size-bytes=268435456 - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -querier.frontend-client.grpc-max-send-msg-size=104857600 - - -querier.max-concurrent=8 - - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -server.http-write-timeout=1m - - -store-gateway.sharding-ring.prefix=multi-zone/ - - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=memberlist - - -store-gateway.sharding-ring.zone-awareness-enabled=true - - -store.max-query-length=768h - - -target=querier - env: - - name: JAEGER_REPORTER_MAX_QUEUE_SIZE - value: "1024" - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: querier - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 24Gi - requests: - cpu: "1" - memory: 12Gi - volumeMounts: - - mountPath: /etc/mimir - name: overrides - volumes: - - configMap: - name: overrides - name: overrides ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: query-frontend - namespace: default -spec: - minReadySeconds: 10 - replicas: 2 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: query-frontend - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - template: - metadata: - labels: - name: query-frontend - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: query-frontend - topologyKey: kubernetes.io/hostname - containers: - - args: - - -query-frontend.align-querier-with-step=false - - -query-frontend.cache-results=true - - -query-frontend.max-cache-freshness=10m - - -query-frontend.results-cache.backend=memcached - - -query-frontend.results-cache.memcached.addresses=dnssrvnoa+memcached-frontend.default.svc.cluster.local:11211 - - -query-frontend.results-cache.memcached.timeout=500ms - - -query-frontend.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -server.http-write-timeout=1m - - -store.max-query-length=12000h - - -target=query-frontend - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: query-frontend - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 1200Mi - requests: - cpu: "2" - memory: 600Mi - volumeMounts: - - mountPath: /etc/mimir - name: overrides - volumes: - - configMap: - name: overrides - name: overrides ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: query-scheduler - namespace: default -spec: - minReadySeconds: 10 - replicas: 2 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: query-scheduler - strategy: - rollingUpdate: - maxSurge: 0 - maxUnavailable: 1 - template: - metadata: - labels: - name: query-scheduler - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: query-scheduler - topologyKey: kubernetes.io/hostname - containers: - - args: - - -query-scheduler.max-outstanding-requests-per-tenant=100 - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -target=query-scheduler - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: query-scheduler - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 2Gi - requests: - cpu: "2" - memory: 1Gi - volumeMounts: - - mountPath: /etc/mimir - name: overrides - volumes: - - configMap: - name: overrides - name: overrides ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: rollout-operator - namespace: default -spec: - minReadySeconds: 10 - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: rollout-operator - strategy: - rollingUpdate: - maxSurge: 0 - maxUnavailable: 1 - template: - metadata: - labels: - name: rollout-operator - spec: - containers: - - args: - - -kubernetes.namespace=default - image: grafana/rollout-operator:v0.1.1 - imagePullPolicy: IfNotPresent - name: rollout-operator - ports: - - containerPort: 8001 - name: http-metrics - readinessProbe: - httpGet: - path: /ready - port: 8001 - initialDelaySeconds: 5 - timeoutSeconds: 1 - resources: - limits: - cpu: "1" - memory: 200Mi - requests: - cpu: 100m - memory: 100Mi - serviceAccountName: rollout-operator ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: ruler - namespace: default -spec: - minReadySeconds: 10 - replicas: 2 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: ruler - strategy: - rollingUpdate: - maxSurge: 0 - maxUnavailable: 1 - template: - metadata: - labels: - gossip_ring_member: "true" - name: ruler - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: ruler - topologyKey: kubernetes.io/hostname - containers: - - args: - - -blocks-storage.backend=gcs - - -blocks-storage.bucket-store.metadata-cache.backend=memcached - - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 - - -blocks-storage.bucket-store.sync-dir=/data/tsdb - - -blocks-storage.bucket-store.sync-interval=15m - - -blocks-storage.gcs.bucket-name=blocks-bucket - - -distributor.health-check-ingesters=true - - -ingester.ring.heartbeat-timeout=10m - - -ingester.ring.prefix= - - -ingester.ring.replication-factor=3 - - -ingester.ring.store=memberlist - - -ingester.ring.zone-awareness-enabled=true - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -ruler-storage.backend=gcs - - -ruler-storage.gcs.bucket-name=rules-bucket - - -ruler.alertmanager-url=http://alertmanager.default.svc.cluster.local/alertmanager - - -ruler.max-rule-groups-per-tenant=35 - - -ruler.max-rules-per-rule-group=20 - - -ruler.ring.store=memberlist - - -ruler.rule-path=/rules - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.prefix=multi-zone/ - - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=memberlist - - -store-gateway.sharding-ring.zone-awareness-enabled=true - - -store.max-query-length=768h - - -target=ruler - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: ruler - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - cpu: "16" - memory: 16Gi - requests: - cpu: "1" - memory: 6Gi - volumeMounts: - - mountPath: /etc/mimir - name: overrides - terminationGracePeriodSeconds: 600 - volumes: - - configMap: - name: overrides - name: overrides ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - labels: - name: alertmanager - name: alertmanager - namespace: default -spec: - replicas: 3 - selector: - matchLabels: - name: alertmanager - serviceName: alertmanager - template: - metadata: - labels: - gossip_ring_member: "true" - name: alertmanager - spec: - containers: - - args: - - -alertmanager-storage.backend=gcs - - -alertmanager-storage.gcs.bucket-name=alerts-bucket - - -alertmanager.sharding-ring.replication-factor=3 - - -alertmanager.sharding-ring.store=memberlist - - -alertmanager.storage.path=/data - - -alertmanager.web.external-url=http://test/alertmanager - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -target=alertmanager - env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: alertmanager - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 15Gi - requests: - cpu: "2" - memory: 10Gi - volumeMounts: - - mountPath: /data - name: alertmanager-data - - mountPath: /etc/mimir - name: overrides - securityContext: - runAsUser: 0 - terminationGracePeriodSeconds: 900 - volumes: - - configMap: - name: overrides - name: overrides - updateStrategy: - type: RollingUpdate - volumeClaimTemplates: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: alertmanager-data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 100Gi ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - labels: - name: compactor - name: compactor - namespace: default -spec: - podManagementPolicy: Parallel - replicas: 1 - selector: - matchLabels: - name: compactor - serviceName: compactor - template: - metadata: - labels: - gossip_ring_member: "true" - name: compactor - spec: - containers: - - args: - - -blocks-storage.backend=gcs - - -blocks-storage.gcs.bucket-name=blocks-bucket - - -compactor.block-ranges=2h,12h,24h - - -compactor.blocks-retention-period=0 - - -compactor.cleanup-interval=15m - - -compactor.compaction-concurrency=1 - - -compactor.compaction-interval=30m - - -compactor.compactor-tenant-shard-size=1 - - -compactor.data-dir=/data - - -compactor.deletion-delay=2h - - -compactor.max-closing-blocks-concurrency=2 - - -compactor.max-opening-blocks-concurrency=4 - - -compactor.ring.prefix= - - -compactor.ring.store=memberlist - - -compactor.ring.wait-stability-min-duration=1m - - -compactor.split-and-merge-shards=0 - - -compactor.split-groups=1 - - -compactor.symbols-flushers-concurrency=4 - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -target=compactor - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: compactor - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 6Gi - requests: - cpu: 1 - memory: 6Gi - volumeMounts: - - mountPath: /data - name: compactor-data - - mountPath: /etc/mimir - name: overrides - securityContext: - runAsUser: 0 - terminationGracePeriodSeconds: 900 - volumes: - - configMap: - name: overrides - name: overrides - updateStrategy: - type: RollingUpdate - volumeClaimTemplates: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: compactor-data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 250Gi - storageClassName: standard ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - annotations: - rollout-max-unavailable: "10" - labels: - rollout-group: ingester - name: ingester-zone-a - namespace: default -spec: - podManagementPolicy: Parallel - replicas: 1 - selector: - matchLabels: - name: ingester-zone-a - rollout-group: ingester - serviceName: ingester-zone-a - template: - metadata: - labels: - gossip_ring_member: "true" - name: ingester-zone-a - rollout-group: ingester - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: rollout-group - operator: In - values: - - ingester - - key: name - operator: NotIn - values: - - ingester-zone-a - topologyKey: kubernetes.io/hostname - containers: - - args: - - -blocks-storage.backend=gcs - - -blocks-storage.gcs.bucket-name=blocks-bucket - - -blocks-storage.tsdb.block-ranges-period=2h - - -blocks-storage.tsdb.dir=/data/tsdb - - -blocks-storage.tsdb.ship-interval=1m - - -distributor.health-check-ingesters=true - - -ingester.max-global-series-per-metric=20000 - - -ingester.max-global-series-per-user=150000 - - -ingester.ring.heartbeat-period=15s - - -ingester.ring.heartbeat-timeout=10m - - -ingester.ring.instance-availability-zone=zone-a - - -ingester.ring.num-tokens=512 - - -ingester.ring.prefix= - - -ingester.ring.readiness-check-ring-health=false - - -ingester.ring.replication-factor=3 - - -ingester.ring.store=memberlist - - -ingester.ring.tokens-file-path=/data/tokens - - -ingester.ring.unregister-on-shutdown=true - - -ingester.ring.zone-awareness-enabled=true - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc-max-concurrent-streams=10000 - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -target=ingester - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: ingester - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 25Gi - requests: - cpu: "4" - memory: 15Gi - volumeMounts: - - mountPath: /data - name: ingester-data - - mountPath: /etc/mimir - name: overrides - securityContext: - runAsUser: 0 - terminationGracePeriodSeconds: 1200 - volumes: - - configMap: - name: overrides - name: overrides - updateStrategy: - type: OnDelete - volumeClaimTemplates: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: ingester-data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 100Gi - storageClassName: fast ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - annotations: - rollout-max-unavailable: "10" - labels: - rollout-group: ingester - name: ingester-zone-b - namespace: default -spec: - podManagementPolicy: Parallel - replicas: 1 - selector: - matchLabels: - name: ingester-zone-b - rollout-group: ingester - serviceName: ingester-zone-b - template: - metadata: - labels: - gossip_ring_member: "true" - name: ingester-zone-b - rollout-group: ingester - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: rollout-group - operator: In - values: - - ingester - - key: name - operator: NotIn - values: - - ingester-zone-b - topologyKey: kubernetes.io/hostname - containers: - - args: - - -blocks-storage.backend=gcs - - -blocks-storage.gcs.bucket-name=blocks-bucket - - -blocks-storage.tsdb.block-ranges-period=2h - - -blocks-storage.tsdb.dir=/data/tsdb - - -blocks-storage.tsdb.ship-interval=1m - - -distributor.health-check-ingesters=true - - -ingester.max-global-series-per-metric=20000 - - -ingester.max-global-series-per-user=150000 - - -ingester.ring.heartbeat-period=15s - - -ingester.ring.heartbeat-timeout=10m - - -ingester.ring.instance-availability-zone=zone-b - - -ingester.ring.num-tokens=512 - - -ingester.ring.prefix= - - -ingester.ring.readiness-check-ring-health=false - - -ingester.ring.replication-factor=3 - - -ingester.ring.store=memberlist - - -ingester.ring.tokens-file-path=/data/tokens - - -ingester.ring.unregister-on-shutdown=true - - -ingester.ring.zone-awareness-enabled=true - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc-max-concurrent-streams=10000 - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -target=ingester - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: ingester - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 25Gi - requests: - cpu: "4" - memory: 15Gi - volumeMounts: - - mountPath: /data - name: ingester-data - - mountPath: /etc/mimir - name: overrides - securityContext: - runAsUser: 0 - terminationGracePeriodSeconds: 1200 - volumes: - - configMap: - name: overrides - name: overrides - updateStrategy: - type: OnDelete - volumeClaimTemplates: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: ingester-data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 100Gi - storageClassName: fast ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - annotations: - rollout-max-unavailable: "10" - labels: - rollout-group: ingester - name: ingester-zone-c - namespace: default -spec: - podManagementPolicy: Parallel - replicas: 1 - selector: - matchLabels: - name: ingester-zone-c - rollout-group: ingester - serviceName: ingester-zone-c - template: - metadata: - labels: - gossip_ring_member: "true" - name: ingester-zone-c - rollout-group: ingester - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: rollout-group - operator: In - values: - - ingester - - key: name - operator: NotIn - values: - - ingester-zone-c - topologyKey: kubernetes.io/hostname - containers: - - args: - - -blocks-storage.backend=gcs - - -blocks-storage.gcs.bucket-name=blocks-bucket - - -blocks-storage.tsdb.block-ranges-period=2h - - -blocks-storage.tsdb.dir=/data/tsdb - - -blocks-storage.tsdb.ship-interval=1m - - -distributor.health-check-ingesters=true - - -ingester.max-global-series-per-metric=20000 - - -ingester.max-global-series-per-user=150000 - - -ingester.ring.heartbeat-period=15s - - -ingester.ring.heartbeat-timeout=10m - - -ingester.ring.instance-availability-zone=zone-c - - -ingester.ring.num-tokens=512 - - -ingester.ring.prefix= - - -ingester.ring.readiness-check-ring-health=false - - -ingester.ring.replication-factor=3 - - -ingester.ring.store=memberlist - - -ingester.ring.tokens-file-path=/data/tokens - - -ingester.ring.unregister-on-shutdown=true - - -ingester.ring.zone-awareness-enabled=true - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc-max-concurrent-streams=10000 - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -target=ingester - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: ingester - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 25Gi - requests: - cpu: "4" - memory: 15Gi - volumeMounts: - - mountPath: /data - name: ingester-data - - mountPath: /etc/mimir - name: overrides - securityContext: - runAsUser: 0 - terminationGracePeriodSeconds: 1200 - volumes: - - configMap: - name: overrides - name: overrides - updateStrategy: - type: OnDelete - volumeClaimTemplates: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: ingester-data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 100Gi - storageClassName: fast ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: memcached - namespace: default -spec: - replicas: 3 - selector: - matchLabels: - name: memcached - serviceName: memcached - template: - metadata: - labels: - name: memcached - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: memcached - topologyKey: kubernetes.io/hostname - containers: - - args: - - -m 6144 - - -I 1m - - -c 16384 - - -v - image: memcached:1.6.9-alpine - imagePullPolicy: IfNotPresent - name: memcached - ports: - - containerPort: 11211 - name: client - resources: - limits: - memory: 9Gi - requests: - cpu: 500m - memory: 6552Mi - - args: - - --memcached.address=localhost:11211 - - --web.listen-address=0.0.0.0:9150 - image: prom/memcached-exporter:v0.6.0 - imagePullPolicy: IfNotPresent - name: exporter - ports: - - containerPort: 9150 - name: http-metrics - updateStrategy: - type: RollingUpdate ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: memcached-frontend - namespace: default -spec: - replicas: 3 - selector: - matchLabels: - name: memcached-frontend - serviceName: memcached-frontend - template: - metadata: - labels: - name: memcached-frontend - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: memcached-frontend - topologyKey: kubernetes.io/hostname - containers: - - args: - - -m 1024 - - -I 5m - - -c 1024 - - -v - image: memcached:1.6.9-alpine - imagePullPolicy: IfNotPresent - name: memcached - ports: - - containerPort: 11211 - name: client - resources: - limits: - memory: 1536Mi - requests: - cpu: 500m - memory: 1329Mi - - args: - - --memcached.address=localhost:11211 - - --web.listen-address=0.0.0.0:9150 - image: prom/memcached-exporter:v0.6.0 - imagePullPolicy: IfNotPresent - name: exporter - ports: - - containerPort: 9150 - name: http-metrics - updateStrategy: - type: RollingUpdate ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: memcached-index-queries - namespace: default -spec: - replicas: 3 - selector: - matchLabels: - name: memcached-index-queries - serviceName: memcached-index-queries - template: - metadata: - labels: - name: memcached-index-queries - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: memcached-index-queries - topologyKey: kubernetes.io/hostname - containers: - - args: - - -m 1024 - - -I 5m - - -c 16384 - - -v - image: memcached:1.6.9-alpine - imagePullPolicy: IfNotPresent - name: memcached - ports: - - containerPort: 11211 - name: client - resources: - limits: - memory: 1536Mi - requests: - cpu: 500m - memory: 1329Mi - - args: - - --memcached.address=localhost:11211 - - --web.listen-address=0.0.0.0:9150 - image: prom/memcached-exporter:v0.6.0 - imagePullPolicy: IfNotPresent - name: exporter - ports: - - containerPort: 9150 - name: http-metrics - updateStrategy: - type: RollingUpdate ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: memcached-metadata - namespace: default -spec: - replicas: 1 - selector: - matchLabels: - name: memcached-metadata - serviceName: memcached-metadata - template: - metadata: - labels: - name: memcached-metadata - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: memcached-metadata - topologyKey: kubernetes.io/hostname - containers: - - args: - - -m 512 - - -I 1m - - -c 16384 - - -v - image: memcached:1.6.9-alpine - imagePullPolicy: IfNotPresent - name: memcached - ports: - - containerPort: 11211 - name: client - resources: - limits: - memory: 768Mi - requests: - cpu: 500m - memory: 715Mi - - args: - - --memcached.address=localhost:11211 - - --web.listen-address=0.0.0.0:9150 - image: prom/memcached-exporter:v0.6.0 - imagePullPolicy: IfNotPresent - name: exporter - ports: - - containerPort: 9150 - name: http-metrics - updateStrategy: - type: RollingUpdate ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - annotations: - rollout-max-unavailable: "10" - labels: - rollout-group: store-gateway - name: store-gateway-zone-a - namespace: default -spec: - podManagementPolicy: Parallel - replicas: 1 - selector: - matchLabels: - name: store-gateway-zone-a - rollout-group: store-gateway - serviceName: store-gateway-zone-a - template: - metadata: - labels: - gossip_ring_member: "true" - name: store-gateway-zone-a - rollout-group: store-gateway - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: rollout-group - operator: In - values: - - store-gateway - - key: name - operator: NotIn - values: - - store-gateway-zone-a - topologyKey: kubernetes.io/hostname - containers: - - args: - - -blocks-storage.backend=gcs - - -blocks-storage.bucket-store.chunks-cache.backend=memcached - - -blocks-storage.bucket-store.chunks-cache.memcached.addresses=dnssrvnoa+memcached.default.svc.cluster.local:11211 - - -blocks-storage.bucket-store.chunks-cache.memcached.max-async-concurrency=50 - - -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100 - - -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=100 - - -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576 - - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms - - -blocks-storage.bucket-store.index-cache.backend=memcached - - -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local:11211 - - -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50 - - -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100 - - -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=100 - - -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880 - - -blocks-storage.bucket-store.index-header-lazy-loading-enabled=true - - -blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout=60m - - -blocks-storage.bucket-store.max-chunk-pool-bytes=12884901888 - - -blocks-storage.bucket-store.metadata-cache.backend=memcached - - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-get-multi-concurrency=100 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-idle-connections=100 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 - - -blocks-storage.bucket-store.sync-dir=/data/tsdb - - -blocks-storage.bucket-store.sync-interval=15m - - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.instance-availability-zone=zone-a - - -store-gateway.sharding-ring.prefix=multi-zone/ - - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=memberlist - - -store-gateway.sharding-ring.tokens-file-path=/data/tokens - - -store-gateway.sharding-ring.unregister-on-shutdown=false - - -store-gateway.sharding-ring.wait-stability-min-duration=1m - - -store-gateway.sharding-ring.zone-awareness-enabled=true - - -target=store-gateway - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: store-gateway - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 18Gi - requests: - cpu: "1" - memory: 12Gi - volumeMounts: - - mountPath: /data - name: store-gateway-data - - mountPath: /etc/mimir - name: overrides - securityContext: - runAsUser: 0 - terminationGracePeriodSeconds: 120 - volumes: - - configMap: - name: overrides - name: overrides - updateStrategy: - type: OnDelete - volumeClaimTemplates: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: store-gateway-data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 50Gi - storageClassName: standard ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - annotations: - rollout-max-unavailable: "10" - labels: - rollout-group: store-gateway - name: store-gateway-zone-b - namespace: default -spec: - podManagementPolicy: Parallel - replicas: 1 - selector: - matchLabels: - name: store-gateway-zone-b - rollout-group: store-gateway - serviceName: store-gateway-zone-b - template: - metadata: - labels: - gossip_ring_member: "true" - name: store-gateway-zone-b - rollout-group: store-gateway - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: rollout-group - operator: In - values: - - store-gateway - - key: name - operator: NotIn - values: - - store-gateway-zone-b - topologyKey: kubernetes.io/hostname - containers: - - args: - - -blocks-storage.backend=gcs - - -blocks-storage.bucket-store.chunks-cache.backend=memcached - - -blocks-storage.bucket-store.chunks-cache.memcached.addresses=dnssrvnoa+memcached.default.svc.cluster.local:11211 - - -blocks-storage.bucket-store.chunks-cache.memcached.max-async-concurrency=50 - - -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100 - - -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=100 - - -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576 - - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms - - -blocks-storage.bucket-store.index-cache.backend=memcached - - -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local:11211 - - -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50 - - -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100 - - -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=100 - - -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880 - - -blocks-storage.bucket-store.index-header-lazy-loading-enabled=true - - -blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout=60m - - -blocks-storage.bucket-store.max-chunk-pool-bytes=12884901888 - - -blocks-storage.bucket-store.metadata-cache.backend=memcached - - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-get-multi-concurrency=100 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-idle-connections=100 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 - - -blocks-storage.bucket-store.sync-dir=/data/tsdb - - -blocks-storage.bucket-store.sync-interval=15m - - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.instance-availability-zone=zone-b - - -store-gateway.sharding-ring.prefix=multi-zone/ - - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=memberlist - - -store-gateway.sharding-ring.tokens-file-path=/data/tokens - - -store-gateway.sharding-ring.unregister-on-shutdown=false - - -store-gateway.sharding-ring.wait-stability-min-duration=1m - - -store-gateway.sharding-ring.zone-awareness-enabled=true - - -target=store-gateway - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: store-gateway - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 18Gi - requests: - cpu: "1" - memory: 12Gi - volumeMounts: - - mountPath: /data - name: store-gateway-data - - mountPath: /etc/mimir - name: overrides - securityContext: - runAsUser: 0 - terminationGracePeriodSeconds: 120 - volumes: - - configMap: - name: overrides - name: overrides - updateStrategy: - type: OnDelete - volumeClaimTemplates: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: store-gateway-data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 50Gi - storageClassName: standard ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - annotations: - rollout-max-unavailable: "10" - labels: - rollout-group: store-gateway - name: store-gateway-zone-c - namespace: default -spec: - podManagementPolicy: Parallel - replicas: 1 - selector: - matchLabels: - name: store-gateway-zone-c - rollout-group: store-gateway - serviceName: store-gateway-zone-c - template: - metadata: - labels: - gossip_ring_member: "true" - name: store-gateway-zone-c - rollout-group: store-gateway - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchExpressions: - - key: rollout-group - operator: In - values: - - store-gateway - - key: name - operator: NotIn - values: - - store-gateway-zone-c - topologyKey: kubernetes.io/hostname - containers: - - args: - - -blocks-storage.backend=gcs - - -blocks-storage.bucket-store.chunks-cache.backend=memcached - - -blocks-storage.bucket-store.chunks-cache.memcached.addresses=dnssrvnoa+memcached.default.svc.cluster.local:11211 - - -blocks-storage.bucket-store.chunks-cache.memcached.max-async-concurrency=50 - - -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100 - - -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=100 - - -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576 - - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms - - -blocks-storage.bucket-store.index-cache.backend=memcached - - -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local:11211 - - -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50 - - -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100 - - -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=100 - - -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880 - - -blocks-storage.bucket-store.index-header-lazy-loading-enabled=true - - -blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout=60m - - -blocks-storage.bucket-store.max-chunk-pool-bytes=12884901888 - - -blocks-storage.bucket-store.metadata-cache.backend=memcached - - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-get-multi-concurrency=100 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-idle-connections=100 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 - - -blocks-storage.bucket-store.sync-dir=/data/tsdb - - -blocks-storage.bucket-store.sync-interval=15m - - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.instance-availability-zone=zone-c - - -store-gateway.sharding-ring.prefix=multi-zone/ - - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=memberlist - - -store-gateway.sharding-ring.tokens-file-path=/data/tokens - - -store-gateway.sharding-ring.unregister-on-shutdown=false - - -store-gateway.sharding-ring.wait-stability-min-duration=1m - - -store-gateway.sharding-ring.zone-awareness-enabled=true - - -target=store-gateway - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: store-gateway - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 18Gi - requests: - cpu: "1" - memory: 12Gi - volumeMounts: - - mountPath: /data - name: store-gateway-data - - mountPath: /etc/mimir - name: overrides - securityContext: - runAsUser: 0 - terminationGracePeriodSeconds: 120 - volumes: - - configMap: - name: overrides - name: overrides - updateStrategy: - type: OnDelete - volumeClaimTemplates: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: store-gateway-data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 50Gi - storageClassName: standard ---- -apiVersion: etcd.database.coreos.com/v1beta2 -kind: EtcdCluster -metadata: - annotations: - etcd.database.coreos.com/scope: clusterwide - name: etcd - namespace: default -spec: - pod: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - etcd_cluster: etcd - topologyKey: kubernetes.io/hostname - annotations: - prometheus.io/port: "2379" - prometheus.io/scrape: "true" - etcdEnv: - - name: ETCD_AUTO_COMPACTION_RETENTION - value: 1h - labels: - name: etcd - resources: - limits: - memory: 512Mi - requests: - cpu: 500m - memory: 512Mi - size: 3 - version: 3.3.13 diff --git a/operations/mimir-tests/test-gossip-ruler-disabled-generated.yaml b/operations/mimir-tests/test-gossip-ruler-disabled-generated.yaml deleted file mode 100644 index 71782ca8bb..0000000000 --- a/operations/mimir-tests/test-gossip-ruler-disabled-generated.yaml +++ /dev/null @@ -1,1315 +0,0 @@ -apiVersion: v1 -kind: Namespace -metadata: - name: default ---- -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - labels: - name: alertmanager-pdb - name: alertmanager-pdb - namespace: default -spec: - maxUnavailable: 1 - selector: - matchLabels: - name: alertmanager ---- -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - labels: - name: ingester-pdb - name: ingester-pdb - namespace: default -spec: - maxUnavailable: 1 - selector: - matchLabels: - name: ingester ---- -apiVersion: policy/v1beta1 -kind: PodDisruptionBudget -metadata: - labels: - name: store-gateway-pdb - name: store-gateway-pdb - namespace: default -spec: - maxUnavailable: 2 - selector: - matchLabels: - name: store-gateway ---- -apiVersion: v1 -data: - overrides.yaml: | - overrides: {} -kind: ConfigMap -metadata: - name: overrides - namespace: default ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: alertmanager - name: alertmanager - namespace: default -spec: - clusterIP: None - ports: - - name: alertmanager-http-metrics - port: 8080 - targetPort: 8080 - - name: alertmanager-grpc - port: 9095 - targetPort: 9095 - - name: alertmanager-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: alertmanager ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: compactor - name: compactor - namespace: default -spec: - clusterIP: None - ports: - - name: compactor-http-metrics - port: 8080 - targetPort: 8080 - - name: compactor-grpc - port: 9095 - targetPort: 9095 - - name: compactor-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: compactor ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: distributor - name: distributor - namespace: default -spec: - clusterIP: None - ports: - - name: distributor-http-metrics - port: 8080 - targetPort: 8080 - - name: distributor-grpc - port: 9095 - targetPort: 9095 - - name: distributor-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: distributor ---- -apiVersion: v1 -kind: Service -metadata: - name: gossip-ring - namespace: default -spec: - clusterIP: None - ports: - - name: gossip-ring - port: 7946 - protocol: TCP - targetPort: 7946 - selector: - gossip_ring_member: "true" ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: ingester - name: ingester - namespace: default -spec: - ports: - - name: ingester-http-metrics - port: 8080 - targetPort: 8080 - - name: ingester-grpc - port: 9095 - targetPort: 9095 - - name: ingester-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: ingester ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: memcached - name: memcached - namespace: default -spec: - clusterIP: None - ports: - - name: memcached-client - port: 11211 - targetPort: 11211 - - name: exporter-http-metrics - port: 9150 - targetPort: 9150 - selector: - name: memcached ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: memcached-frontend - name: memcached-frontend - namespace: default -spec: - clusterIP: None - ports: - - name: memcached-client - port: 11211 - targetPort: 11211 - - name: exporter-http-metrics - port: 9150 - targetPort: 9150 - selector: - name: memcached-frontend ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: memcached-index-queries - name: memcached-index-queries - namespace: default -spec: - clusterIP: None - ports: - - name: memcached-client - port: 11211 - targetPort: 11211 - - name: exporter-http-metrics - port: 9150 - targetPort: 9150 - selector: - name: memcached-index-queries ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: memcached-metadata - name: memcached-metadata - namespace: default -spec: - clusterIP: None - ports: - - name: memcached-client - port: 11211 - targetPort: 11211 - - name: exporter-http-metrics - port: 9150 - targetPort: 9150 - selector: - name: memcached-metadata ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: querier - name: querier - namespace: default -spec: - ports: - - name: querier-http-metrics - port: 8080 - targetPort: 8080 - - name: querier-grpc - port: 9095 - targetPort: 9095 - - name: querier-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: querier ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: query-frontend - name: query-frontend - namespace: default -spec: - ports: - - name: query-frontend-http-metrics - port: 8080 - targetPort: 8080 - - name: query-frontend-grpc - port: 9095 - targetPort: 9095 - selector: - name: query-frontend ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: query-frontend - name: query-frontend-discovery - namespace: default -spec: - clusterIP: None - ports: - - name: query-frontend-http-metrics - port: 8080 - targetPort: 8080 - - name: query-frontend-grpc - port: 9095 - targetPort: 9095 - publishNotReadyAddresses: true - selector: - name: query-frontend ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: query-scheduler - name: query-scheduler - namespace: default -spec: - ports: - - name: query-scheduler-http-metrics - port: 8080 - targetPort: 8080 - - name: query-scheduler-grpc - port: 9095 - targetPort: 9095 - selector: - name: query-scheduler ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: query-scheduler - name: query-scheduler-discovery - namespace: default -spec: - clusterIP: None - ports: - - name: query-scheduler-http-metrics - port: 8080 - targetPort: 8080 - - name: query-scheduler-grpc - port: 9095 - targetPort: 9095 - publishNotReadyAddresses: true - selector: - name: query-scheduler ---- -apiVersion: v1 -kind: Service -metadata: - labels: - name: store-gateway - name: store-gateway - namespace: default -spec: - ports: - - name: store-gateway-http-metrics - port: 8080 - targetPort: 8080 - - name: store-gateway-grpc - port: 9095 - targetPort: 9095 - - name: store-gateway-gossip-ring - port: 7946 - targetPort: 7946 - selector: - name: store-gateway ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: distributor - namespace: default -spec: - minReadySeconds: 10 - replicas: 3 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: distributor - strategy: - rollingUpdate: - maxSurge: 5 - maxUnavailable: 1 - template: - metadata: - labels: - gossip_ring_member: "true" - name: distributor - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: distributor - topologyKey: kubernetes.io/hostname - containers: - - args: - - -distributor.ha-tracker.enable=true - - -distributor.ha-tracker.enable-for-all-users=true - - -distributor.ha-tracker.etcd.endpoints=etcd-client.default.svc.cluster.local.:2379 - - -distributor.ha-tracker.prefix=prom_ha/ - - -distributor.ha-tracker.store=etcd - - -distributor.health-check-ingesters=true - - -distributor.ingestion-burst-size=200000 - - -distributor.ingestion-rate-limit=10000 - - -distributor.ring.prefix= - - -distributor.ring.store=memberlist - - -ingester.ring.heartbeat-timeout=10m - - -ingester.ring.prefix= - - -ingester.ring.replication-factor=3 - - -ingester.ring.store=memberlist - - -mem-ballast-size-bytes=1073741824 - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.max-connection-age=2m - - -server.grpc.keepalive.max-connection-age-grace=5m - - -server.grpc.keepalive.max-connection-idle=1m - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -target=distributor - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: distributor - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 4Gi - requests: - cpu: "2" - memory: 2Gi - volumeMounts: - - mountPath: /etc/mimir - name: overrides - volumes: - - configMap: - name: overrides - name: overrides ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: querier - namespace: default -spec: - minReadySeconds: 10 - replicas: 6 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: querier - strategy: - rollingUpdate: - maxSurge: 5 - maxUnavailable: 1 - template: - metadata: - labels: - gossip_ring_member: "true" - name: querier - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: querier - topologyKey: kubernetes.io/hostname - containers: - - args: - - -blocks-storage.backend=gcs - - -blocks-storage.bucket-store.metadata-cache.backend=memcached - - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 - - -blocks-storage.bucket-store.sync-dir=/data/tsdb - - -blocks-storage.bucket-store.sync-interval=15m - - -blocks-storage.gcs.bucket-name=blocks-bucket - - -distributor.health-check-ingesters=true - - -ingester.ring.heartbeat-timeout=10m - - -ingester.ring.prefix= - - -ingester.ring.replication-factor=3 - - -ingester.ring.store=memberlist - - -mem-ballast-size-bytes=268435456 - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -querier.frontend-client.grpc-max-send-msg-size=104857600 - - -querier.max-concurrent=8 - - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -server.http-write-timeout=1m - - -store-gateway.sharding-ring.prefix= - - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=memberlist - - -store.max-query-length=768h - - -target=querier - env: - - name: JAEGER_REPORTER_MAX_QUEUE_SIZE - value: "1024" - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: querier - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 24Gi - requests: - cpu: "1" - memory: 12Gi - volumeMounts: - - mountPath: /etc/mimir - name: overrides - volumes: - - configMap: - name: overrides - name: overrides ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: query-frontend - namespace: default -spec: - minReadySeconds: 10 - replicas: 2 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: query-frontend - strategy: - rollingUpdate: - maxSurge: 1 - maxUnavailable: 1 - template: - metadata: - labels: - name: query-frontend - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: query-frontend - topologyKey: kubernetes.io/hostname - containers: - - args: - - -query-frontend.align-querier-with-step=false - - -query-frontend.cache-results=true - - -query-frontend.max-cache-freshness=10m - - -query-frontend.results-cache.backend=memcached - - -query-frontend.results-cache.memcached.addresses=dnssrvnoa+memcached-frontend.default.svc.cluster.local:11211 - - -query-frontend.results-cache.memcached.timeout=500ms - - -query-frontend.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -server.http-write-timeout=1m - - -store.max-query-length=12000h - - -target=query-frontend - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: query-frontend - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 1200Mi - requests: - cpu: "2" - memory: 600Mi - volumeMounts: - - mountPath: /etc/mimir - name: overrides - volumes: - - configMap: - name: overrides - name: overrides ---- -apiVersion: apps/v1 -kind: Deployment -metadata: - name: query-scheduler - namespace: default -spec: - minReadySeconds: 10 - replicas: 2 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: query-scheduler - strategy: - rollingUpdate: - maxSurge: 0 - maxUnavailable: 1 - template: - metadata: - labels: - name: query-scheduler - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: query-scheduler - topologyKey: kubernetes.io/hostname - containers: - - args: - - -query-scheduler.max-outstanding-requests-per-tenant=100 - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -target=query-scheduler - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: query-scheduler - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 2Gi - requests: - cpu: "2" - memory: 1Gi - volumeMounts: - - mountPath: /etc/mimir - name: overrides - volumes: - - configMap: - name: overrides - name: overrides ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - labels: - name: alertmanager - name: alertmanager - namespace: default -spec: - replicas: 3 - selector: - matchLabels: - name: alertmanager - serviceName: alertmanager - template: - metadata: - labels: - gossip_ring_member: "true" - name: alertmanager - spec: - containers: - - args: - - -alertmanager-storage.backend=gcs - - -alertmanager-storage.gcs.bucket-name=alerts-bucket - - -alertmanager.sharding-ring.replication-factor=3 - - -alertmanager.sharding-ring.store=memberlist - - -alertmanager.storage.path=/data - - -alertmanager.web.external-url=http://test/alertmanager - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -target=alertmanager - env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: alertmanager - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 15Gi - requests: - cpu: "2" - memory: 10Gi - volumeMounts: - - mountPath: /data - name: alertmanager-data - - mountPath: /etc/mimir - name: overrides - securityContext: - runAsUser: 0 - terminationGracePeriodSeconds: 900 - volumes: - - configMap: - name: overrides - name: overrides - updateStrategy: - type: RollingUpdate - volumeClaimTemplates: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: alertmanager-data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 100Gi ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - labels: - name: compactor - name: compactor - namespace: default -spec: - podManagementPolicy: Parallel - replicas: 1 - selector: - matchLabels: - name: compactor - serviceName: compactor - template: - metadata: - labels: - gossip_ring_member: "true" - name: compactor - spec: - containers: - - args: - - -blocks-storage.backend=gcs - - -blocks-storage.gcs.bucket-name=blocks-bucket - - -compactor.block-ranges=2h,12h,24h - - -compactor.blocks-retention-period=0 - - -compactor.cleanup-interval=15m - - -compactor.compaction-concurrency=1 - - -compactor.compaction-interval=30m - - -compactor.compactor-tenant-shard-size=1 - - -compactor.data-dir=/data - - -compactor.deletion-delay=2h - - -compactor.max-closing-blocks-concurrency=2 - - -compactor.max-opening-blocks-concurrency=4 - - -compactor.ring.prefix= - - -compactor.ring.store=memberlist - - -compactor.ring.wait-stability-min-duration=1m - - -compactor.split-and-merge-shards=0 - - -compactor.split-groups=1 - - -compactor.symbols-flushers-concurrency=4 - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -target=compactor - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: compactor - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 6Gi - requests: - cpu: 1 - memory: 6Gi - volumeMounts: - - mountPath: /data - name: compactor-data - - mountPath: /etc/mimir - name: overrides - securityContext: - runAsUser: 0 - terminationGracePeriodSeconds: 900 - volumes: - - configMap: - name: overrides - name: overrides - updateStrategy: - type: RollingUpdate - volumeClaimTemplates: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: compactor-data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 250Gi - storageClassName: standard ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - labels: - name: ingester - name: ingester - namespace: default -spec: - podManagementPolicy: Parallel - replicas: 3 - selector: - matchLabels: - name: ingester - serviceName: ingester - template: - metadata: - labels: - gossip_ring_member: "true" - name: ingester - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: ingester - topologyKey: kubernetes.io/hostname - containers: - - args: - - -blocks-storage.backend=gcs - - -blocks-storage.gcs.bucket-name=blocks-bucket - - -blocks-storage.tsdb.block-ranges-period=2h - - -blocks-storage.tsdb.dir=/data/tsdb - - -blocks-storage.tsdb.ship-interval=1m - - -distributor.health-check-ingesters=true - - -ingester.max-global-series-per-metric=20000 - - -ingester.max-global-series-per-user=150000 - - -ingester.ring.heartbeat-period=15s - - -ingester.ring.heartbeat-timeout=10m - - -ingester.ring.num-tokens=512 - - -ingester.ring.prefix= - - -ingester.ring.readiness-check-ring-health=false - - -ingester.ring.replication-factor=3 - - -ingester.ring.store=memberlist - - -ingester.ring.tokens-file-path=/data/tokens - - -ingester.ring.unregister-on-shutdown=true - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc-max-concurrent-streams=10000 - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -target=ingester - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: ingester - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 25Gi - requests: - cpu: "4" - memory: 15Gi - volumeMounts: - - mountPath: /data - name: ingester-data - - mountPath: /etc/mimir - name: overrides - securityContext: - runAsUser: 0 - terminationGracePeriodSeconds: 1200 - volumes: - - configMap: - name: overrides - name: overrides - updateStrategy: - type: RollingUpdate - volumeClaimTemplates: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: ingester-data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 100Gi - storageClassName: fast ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: memcached - namespace: default -spec: - replicas: 3 - selector: - matchLabels: - name: memcached - serviceName: memcached - template: - metadata: - labels: - name: memcached - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: memcached - topologyKey: kubernetes.io/hostname - containers: - - args: - - -m 6144 - - -I 1m - - -c 16384 - - -v - image: memcached:1.6.9-alpine - imagePullPolicy: IfNotPresent - name: memcached - ports: - - containerPort: 11211 - name: client - resources: - limits: - memory: 9Gi - requests: - cpu: 500m - memory: 6552Mi - - args: - - --memcached.address=localhost:11211 - - --web.listen-address=0.0.0.0:9150 - image: prom/memcached-exporter:v0.6.0 - imagePullPolicy: IfNotPresent - name: exporter - ports: - - containerPort: 9150 - name: http-metrics - updateStrategy: - type: RollingUpdate ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: memcached-frontend - namespace: default -spec: - replicas: 3 - selector: - matchLabels: - name: memcached-frontend - serviceName: memcached-frontend - template: - metadata: - labels: - name: memcached-frontend - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: memcached-frontend - topologyKey: kubernetes.io/hostname - containers: - - args: - - -m 1024 - - -I 5m - - -c 1024 - - -v - image: memcached:1.6.9-alpine - imagePullPolicy: IfNotPresent - name: memcached - ports: - - containerPort: 11211 - name: client - resources: - limits: - memory: 1536Mi - requests: - cpu: 500m - memory: 1329Mi - - args: - - --memcached.address=localhost:11211 - - --web.listen-address=0.0.0.0:9150 - image: prom/memcached-exporter:v0.6.0 - imagePullPolicy: IfNotPresent - name: exporter - ports: - - containerPort: 9150 - name: http-metrics - updateStrategy: - type: RollingUpdate ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: memcached-index-queries - namespace: default -spec: - replicas: 3 - selector: - matchLabels: - name: memcached-index-queries - serviceName: memcached-index-queries - template: - metadata: - labels: - name: memcached-index-queries - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: memcached-index-queries - topologyKey: kubernetes.io/hostname - containers: - - args: - - -m 1024 - - -I 5m - - -c 16384 - - -v - image: memcached:1.6.9-alpine - imagePullPolicy: IfNotPresent - name: memcached - ports: - - containerPort: 11211 - name: client - resources: - limits: - memory: 1536Mi - requests: - cpu: 500m - memory: 1329Mi - - args: - - --memcached.address=localhost:11211 - - --web.listen-address=0.0.0.0:9150 - image: prom/memcached-exporter:v0.6.0 - imagePullPolicy: IfNotPresent - name: exporter - ports: - - containerPort: 9150 - name: http-metrics - updateStrategy: - type: RollingUpdate ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: memcached-metadata - namespace: default -spec: - replicas: 1 - selector: - matchLabels: - name: memcached-metadata - serviceName: memcached-metadata - template: - metadata: - labels: - name: memcached-metadata - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: memcached-metadata - topologyKey: kubernetes.io/hostname - containers: - - args: - - -m 512 - - -I 1m - - -c 16384 - - -v - image: memcached:1.6.9-alpine - imagePullPolicy: IfNotPresent - name: memcached - ports: - - containerPort: 11211 - name: client - resources: - limits: - memory: 768Mi - requests: - cpu: 500m - memory: 715Mi - - args: - - --memcached.address=localhost:11211 - - --web.listen-address=0.0.0.0:9150 - image: prom/memcached-exporter:v0.6.0 - imagePullPolicy: IfNotPresent - name: exporter - ports: - - containerPort: 9150 - name: http-metrics - updateStrategy: - type: RollingUpdate ---- -apiVersion: apps/v1 -kind: StatefulSet -metadata: - labels: - name: store-gateway - name: store-gateway - namespace: default -spec: - podManagementPolicy: Parallel - replicas: 3 - selector: - matchLabels: - name: store-gateway - serviceName: store-gateway - template: - metadata: - labels: - gossip_ring_member: "true" - name: store-gateway - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: store-gateway - topologyKey: kubernetes.io/hostname - containers: - - args: - - -blocks-storage.backend=gcs - - -blocks-storage.bucket-store.chunks-cache.backend=memcached - - -blocks-storage.bucket-store.chunks-cache.memcached.addresses=dnssrvnoa+memcached.default.svc.cluster.local:11211 - - -blocks-storage.bucket-store.chunks-cache.memcached.max-async-concurrency=50 - - -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100 - - -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=100 - - -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576 - - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms - - -blocks-storage.bucket-store.index-cache.backend=memcached - - -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local:11211 - - -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50 - - -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100 - - -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=100 - - -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880 - - -blocks-storage.bucket-store.index-header-lazy-loading-enabled=true - - -blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout=60m - - -blocks-storage.bucket-store.max-chunk-pool-bytes=12884901888 - - -blocks-storage.bucket-store.metadata-cache.backend=memcached - - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-get-multi-concurrency=100 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-idle-connections=100 - - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 - - -blocks-storage.bucket-store.sync-dir=/data/tsdb - - -blocks-storage.bucket-store.sync-interval=15m - - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - - -memberlist.bind-port=7946 - - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - - -runtime-config.file=/etc/mimir/overrides.yaml - - -server.grpc.keepalive.min-time-between-pings=10s - - -server.grpc.keepalive.ping-without-stream-allowed=true - - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.prefix= - - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=memberlist - - -store-gateway.sharding-ring.tokens-file-path=/data/tokens - - -store-gateway.sharding-ring.wait-stability-min-duration=1m - - -target=store-gateway - image: grafana/mimir:2.1.0 - imagePullPolicy: IfNotPresent - name: store-gateway - ports: - - containerPort: 8080 - name: http-metrics - - containerPort: 9095 - name: grpc - - containerPort: 7946 - name: gossip-ring - readinessProbe: - httpGet: - path: /ready - port: 8080 - initialDelaySeconds: 15 - timeoutSeconds: 1 - resources: - limits: - memory: 18Gi - requests: - cpu: "1" - memory: 12Gi - volumeMounts: - - mountPath: /data - name: store-gateway-data - - mountPath: /etc/mimir - name: overrides - securityContext: - runAsUser: 0 - terminationGracePeriodSeconds: 120 - volumes: - - configMap: - name: overrides - name: overrides - updateStrategy: - type: RollingUpdate - volumeClaimTemplates: - - apiVersion: v1 - kind: PersistentVolumeClaim - metadata: - name: store-gateway-data - spec: - accessModes: - - ReadWriteOnce - resources: - requests: - storage: 50Gi - storageClassName: standard ---- -apiVersion: etcd.database.coreos.com/v1beta2 -kind: EtcdCluster -metadata: - annotations: - etcd.database.coreos.com/scope: clusterwide - name: etcd - namespace: default -spec: - pod: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - etcd_cluster: etcd - topologyKey: kubernetes.io/hostname - annotations: - prometheus.io/port: "2379" - prometheus.io/scrape: "true" - etcdEnv: - - name: ETCD_AUTO_COMPACTION_RETENTION - value: 1h - labels: - name: etcd - resources: - limits: - memory: 512Mi - requests: - cpu: 500m - memory: 512Mi - size: 3 - version: 3.3.13 diff --git a/operations/mimir-tests/test-multi-zone-generated.yaml b/operations/mimir-tests/test-multi-zone-generated.yaml index 378afdf269..90b24fa612 100644 --- a/operations/mimir-tests/test-multi-zone-generated.yaml +++ b/operations/mimir-tests/test-multi-zone-generated.yaml @@ -44,230 +44,11 @@ spec: --- apiVersion: v1 kind: ServiceAccount -metadata: - name: consul-sidekick - namespace: default ---- -apiVersion: v1 -kind: ServiceAccount metadata: name: rollout-operator namespace: default --- apiVersion: v1 -data: - consul-config.json: '{"leave_on_terminate": true, "raft_snapshot_threshold": 128, - "raft_trailing_logs": 10000, "telemetry": {"dogstatsd_addr": "127.0.0.1:9125"}}' - mapping: | - mappings: - - match: consul.*.runtime.* - name: consul_runtime - labels: - type: $2 - - match: consul.runtime.total_gc_pause_ns - name: consul_runtime_total_gc_pause_ns - labels: - type: $2 - - match: consul.consul.health.service.query-tag.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3 - - match: consul.consul.health.service.query-tag.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4 - - match: consul.consul.health.service.query-tag.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11.$12 - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.dns.domain_query.*.*.*.*.* - name: consul_dns_domain_query - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.not-found.* - name: consul_health_service_not_found - labels: - query: $1 - - match: consul.consul.health.service.query.* - name: consul_health_service_query - labels: - query: $1 - - match: consul.*.memberlist.health.score - name: consul_memberlist_health_score - labels: {} - - match: consul.serf.queue.* - name: consul_serf_events - labels: - type: $1 - - match: consul.serf.snapshot.appendLine - name: consul_serf_snapshot_appendLine - labels: - type: $1 - - match: consul.serf.coordinate.adjustment-ms - name: consul_serf_coordinate_adjustment_ms - labels: {} - - match: consul.consul.rpc.query - name: consul_rpc_query - labels: {} - - match: consul.*.consul.session_ttl.active - name: consul_session_ttl_active - labels: {} - - match: consul.raft.rpc.* - name: consul_raft_rpc - labels: - type: $1 - - match: consul.raft.rpc.appendEntries.storeLogs - name: consul_raft_rpc_appendEntries_storeLogs - labels: - type: $1 - - match: consul.consul.fsm.persist - name: consul_fsm_persist - labels: {} - - match: consul.raft.fsm.apply - name: consul_raft_fsm_apply - labels: {} - - match: consul.raft.leader.lastContact - name: consul_raft_leader_lastcontact - labels: {} - - match: consul.raft.leader.dispatchLog - name: consul_raft_leader_dispatchLog - labels: {} - - match: consul.raft.commitTime - name: consul_raft_commitTime - labels: {} - - match: consul.raft.replication.appendEntries.logs.*.*.*.* - name: consul_raft_replication_appendEntries_logs - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.appendEntries.rpc.*.*.*.* - name: consul_raft_replication_appendEntries_rpc - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.heartbeat.*.*.*.* - name: consul_raft_replication_heartbeat - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.consul.rpc.request - name: consul_rpc_requests - labels: {} - - match: consul.consul.rpc.accept_conn - name: consul_rpc_accept_conn - labels: {} - - match: consul.memberlist.udp.* - name: consul_memberlist_udp - labels: - type: $1 - - match: consul.memberlist.tcp.* - name: consul_memberlist_tcp - labels: - type: $1 - - match: consul.memberlist.gossip - name: consul_memberlist_gossip - labels: {} - - match: consul.memberlist.probeNode - name: consul_memberlist_probenode - labels: {} - - match: consul.memberlist.pushPullNode - name: consul_memberlist_pushpullnode - labels: {} - - match: consul.http.* - name: consul_http_request - labels: - method: $1 - path: / - - match: consul.http.*.* - name: consul_http_request - labels: - method: $1 - path: /$2 - - match: consul.http.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3 - - match: consul.http.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4 - - match: consul.http.*.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4/$5 - - match: consul.consul.leader.barrier - name: consul_leader_barrier - labels: {} - - match: consul.consul.leader.reconcileMember - name: consul_leader_reconcileMember - labels: {} - - match: consul.consul.leader.reconcile - name: consul_leader_reconcile - labels: {} - - match: consul.consul.fsm.coordinate.batch-update - name: consul_fsm_coordinate_batch_update - labels: {} - - match: consul.consul.fsm.autopilot - name: consul_fsm_autopilot - labels: {} - - match: consul.consul.fsm.kvs.cas - name: consul_fsm_kvs_cas - labels: {} - - match: consul.consul.fsm.register - name: consul_fsm_register - labels: {} - - match: consul.consul.fsm.deregister - name: consul_fsm_deregister - labels: {} - - match: consul.consul.fsm.tombstone.reap - name: consul_fsm_tombstone_reap - labels: {} - - match: consul.consul.catalog.register - name: consul_catalog_register - labels: {} - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.leader.reapTombstones - name: consul_leader_reapTombstones - labels: {} -kind: ConfigMap -metadata: - name: consul - namespace: default ---- -apiVersion: v1 data: overrides.yaml: | overrides: {} @@ -278,24 +59,6 @@ metadata: --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role -metadata: - name: consul-sidekick - namespace: default -rules: -- apiGroups: - - "" - - extensions - - apps - resources: - - pods - - replicasets - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role metadata: name: rollout-operator-role namespace: default @@ -326,20 +89,6 @@ rules: --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding -metadata: - name: consul-sidekick - namespace: default -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: consul-sidekick -subjects: -- kind: ServiceAccount - name: consul-sidekick - namespace: default ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding metadata: name: rollout-operator-rolebinding namespace: default @@ -368,6 +117,9 @@ spec: - name: alertmanager-grpc port: 9095 targetPort: 9095 + - name: alertmanager-gossip-ring + port: 7946 + targetPort: 7946 selector: name: alertmanager --- @@ -387,41 +139,14 @@ spec: - name: compactor-grpc port: 9095 targetPort: 9095 + - name: compactor-gossip-ring + port: 7946 + targetPort: 7946 selector: name: compactor --- apiVersion: v1 kind: Service -metadata: - labels: - name: consul - name: consul - namespace: default -spec: - ports: - - name: consul-server - port: 8300 - targetPort: 8300 - - name: consul-serf - port: 8301 - targetPort: 8301 - - name: consul-client - port: 8400 - targetPort: 8400 - - name: consul-api - port: 8500 - targetPort: 8500 - - name: statsd-exporter-http-metrics - port: 8000 - targetPort: 8000 - - name: consul-exporter-http-metrics - port: 9107 - targetPort: 9107 - selector: - name: consul ---- -apiVersion: v1 -kind: Service metadata: labels: name: distributor @@ -436,11 +161,29 @@ spec: - name: distributor-grpc port: 9095 targetPort: 9095 + - name: distributor-gossip-ring + port: 7946 + targetPort: 7946 selector: name: distributor --- apiVersion: v1 kind: Service +metadata: + name: gossip-ring + namespace: default +spec: + clusterIP: None + ports: + - name: gossip-ring + port: 7946 + protocol: TCP + targetPort: 7946 + selector: + gossip_ring_member: "true" +--- +apiVersion: v1 +kind: Service metadata: labels: name: ingester-zone-a @@ -455,6 +198,9 @@ spec: - name: ingester-grpc port: 9095 targetPort: 9095 + - name: ingester-gossip-ring + port: 7946 + targetPort: 7946 selector: name: ingester-zone-a rollout-group: ingester @@ -475,6 +221,9 @@ spec: - name: ingester-grpc port: 9095 targetPort: 9095 + - name: ingester-gossip-ring + port: 7946 + targetPort: 7946 selector: name: ingester-zone-b rollout-group: ingester @@ -495,6 +244,9 @@ spec: - name: ingester-grpc port: 9095 targetPort: 9095 + - name: ingester-gossip-ring + port: 7946 + targetPort: 7946 selector: name: ingester-zone-c rollout-group: ingester @@ -590,6 +342,9 @@ spec: - name: querier-grpc port: 9095 targetPort: 9095 + - name: querier-gossip-ring + port: 7946 + targetPort: 7946 selector: name: querier --- @@ -719,6 +474,9 @@ spec: - name: store-gateway-grpc port: 9095 targetPort: 9095 + - name: store-gateway-gossip-ring + port: 7946 + targetPort: 7946 selector: name: store-gateway-zone-a rollout-group: store-gateway @@ -739,6 +497,9 @@ spec: - name: store-gateway-grpc port: 9095 targetPort: 9095 + - name: store-gateway-gossip-ring + port: 7946 + targetPort: 7946 selector: name: store-gateway-zone-b rollout-group: store-gateway @@ -759,137 +520,15 @@ spec: - name: store-gateway-grpc port: 9095 targetPort: 9095 + - name: store-gateway-gossip-ring + port: 7946 + targetPort: 7946 selector: name: store-gateway-zone-c rollout-group: store-gateway --- apiVersion: apps/v1 kind: Deployment -metadata: - name: consul - namespace: default -spec: - minReadySeconds: 10 - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: consul - template: - metadata: - annotations: - consul-hash: e56ef6821a3557604caccaf6d5820239 - labels: - name: consul - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: consul - topologyKey: kubernetes.io/hostname - - labelSelector: - matchLabels: - name: ingester - namespaces: - - default - topologyKey: kubernetes.io/hostname - containers: - - args: - - agent - - -ui - - -server - - -client=0.0.0.0 - - -config-file=/etc/config/consul-config.json - - -bootstrap-expect=1 - - -ui-content-path=/default/consul/ - env: - - name: CHECKPOINT_DISABLE - value: "1" - image: consul:1.5.3 - imagePullPolicy: IfNotPresent - name: consul - ports: - - containerPort: 8300 - name: server - - containerPort: 8301 - name: serf - - containerPort: 8400 - name: client - - containerPort: 8500 - name: api - resources: - requests: - cpu: "4" - memory: 4Gi - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --namespace=$(POD_NAMESPACE) - - --pod-name=$(POD_NAME) - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: weaveworks/consul-sidekick:master-f18ad13 - imagePullPolicy: IfNotPresent - name: sidekick - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --web.listen-address=:8000 - - --statsd.mapping-config=/etc/config/mapping - image: prom/statsd-exporter:v0.12.2 - imagePullPolicy: IfNotPresent - name: statsd-exporter - ports: - - containerPort: 8000 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --consul.server=localhost:8500 - - --web.listen-address=:9107 - - --consul.timeout=1s - - --no-consul.health-summary - - --consul.allow_stale - image: prom/consul-exporter:v0.5.0 - imagePullPolicy: IfNotPresent - name: consul-exporter - ports: - - containerPort: 9107 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - serviceAccount: consul-sidekick - volumes: - - configMap: - name: consul - name: consul - - emptyDir: - medium: Memory - name: data ---- -apiVersion: apps/v1 -kind: Deployment metadata: name: distributor namespace: default @@ -907,6 +546,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: distributor spec: affinity: @@ -926,16 +566,17 @@ spec: - -distributor.health-check-ingesters=true - -distributor.ingestion-burst-size=200000 - -distributor.ingestion-rate-limit=10000 - - -distributor.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -distributor.ring.prefix= - - -distributor.ring.store=consul - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -distributor.ring.store=memberlist - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -ingester.ring.zone-awareness-enabled=true - -mem-ballast-size-bytes=1073741824 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.max-connection-age=2m - -server.grpc.keepalive.max-connection-age-grace=5m @@ -952,6 +593,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -991,6 +634,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: querier spec: affinity: @@ -1011,13 +655,15 @@ spec: - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - -distributor.health-check-ingesters=true - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -ingester.ring.zone-awareness-enabled=true - -mem-ballast-size-bytes=268435456 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 - -querier.max-concurrent=8 - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 @@ -1026,10 +672,9 @@ spec: - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - -server.http-write-timeout=1m - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix=multi-zone/ - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store-gateway.sharding-ring.zone-awareness-enabled=true - -store.max-query-length=768h - -target=querier @@ -1044,6 +689,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1263,6 +910,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ruler spec: affinity: @@ -1283,28 +931,28 @@ spec: - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - -distributor.health-check-ingesters=true - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -ingester.ring.zone-awareness-enabled=true + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.backend=gcs - -ruler-storage.gcs.bucket-name=rules-bucket - -ruler.alertmanager-url=http://alertmanager.default.svc.cluster.local/alertmanager - -ruler.max-rule-groups-per-tenant=35 - -ruler.max-rules-per-rule-group=20 - - -ruler.ring.consul.hostname=consul.default.svc.cluster.local:8500 - - -ruler.ring.store=consul + - -ruler.ring.store=memberlist - -ruler.rule-path=/rules - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix=multi-zone/ - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store-gateway.sharding-ring.zone-awareness-enabled=true - -store.max-query-length=768h - -target=ruler @@ -1354,17 +1002,20 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: alertmanager spec: containers: - args: - -alertmanager-storage.backend=gcs - -alertmanager-storage.gcs.bucket-name=alerts-bucket - - -alertmanager.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -alertmanager.sharding-ring.replication-factor=3 - - -alertmanager.sharding-ring.store=consul + - -alertmanager.sharding-ring.store=memberlist - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true @@ -1383,6 +1034,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1438,6 +1091,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: compactor spec: containers: @@ -1454,13 +1108,15 @@ spec: - -compactor.deletion-delay=2h - -compactor.max-closing-blocks-concurrency=2 - -compactor.max-opening-blocks-concurrency=4 - - -compactor.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -compactor.ring.prefix= - - -compactor.ring.store=consul + - -compactor.ring.store=memberlist - -compactor.ring.wait-stability-min-duration=1m - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true @@ -1474,6 +1130,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1533,6 +1191,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ingester-zone-a rollout-group: ingester spec: @@ -1560,7 +1219,6 @@ spec: - -distributor.health-check-ingesters=true - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-period=15s - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.instance-availability-zone=zone-a @@ -1568,10 +1226,13 @@ spec: - -ingester.ring.prefix= - -ingester.ring.readiness-check-ring-health=false - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - -ingester.ring.zone-awareness-enabled=true + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc-max-concurrent-streams=10000 - -server.grpc.keepalive.min-time-between-pings=10s @@ -1586,6 +1247,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1645,6 +1308,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ingester-zone-b rollout-group: ingester spec: @@ -1672,7 +1336,6 @@ spec: - -distributor.health-check-ingesters=true - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-period=15s - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.instance-availability-zone=zone-b @@ -1680,10 +1343,13 @@ spec: - -ingester.ring.prefix= - -ingester.ring.readiness-check-ring-health=false - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - -ingester.ring.zone-awareness-enabled=true + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc-max-concurrent-streams=10000 - -server.grpc.keepalive.min-time-between-pings=10s @@ -1698,6 +1364,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1757,6 +1425,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ingester-zone-c rollout-group: ingester spec: @@ -1784,7 +1453,6 @@ spec: - -distributor.health-check-ingesters=true - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-period=15s - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.instance-availability-zone=zone-c @@ -1792,10 +1460,13 @@ spec: - -ingester.ring.prefix= - -ingester.ring.readiness-check-ring-health=false - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - -ingester.ring.zone-awareness-enabled=true + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc-max-concurrent-streams=10000 - -server.grpc.keepalive.min-time-between-pings=10s @@ -1810,6 +1481,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -2081,6 +1754,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: store-gateway-zone-a rollout-group: store-gateway spec: @@ -2126,15 +1800,17 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.instance-availability-zone=zone-a - -store-gateway.sharding-ring.prefix=multi-zone/ - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store-gateway.sharding-ring.tokens-file-path=/data/tokens - -store-gateway.sharding-ring.unregister-on-shutdown=false - -store-gateway.sharding-ring.wait-stability-min-duration=1m @@ -2148,6 +1824,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -2207,6 +1885,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: store-gateway-zone-b rollout-group: store-gateway spec: @@ -2252,15 +1931,17 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.instance-availability-zone=zone-b - -store-gateway.sharding-ring.prefix=multi-zone/ - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store-gateway.sharding-ring.tokens-file-path=/data/tokens - -store-gateway.sharding-ring.unregister-on-shutdown=false - -store-gateway.sharding-ring.wait-stability-min-duration=1m @@ -2274,6 +1955,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -2333,6 +2016,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: store-gateway-zone-c rollout-group: store-gateway spec: @@ -2378,15 +2062,17 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.instance-availability-zone=zone-c - -store-gateway.sharding-ring.prefix=multi-zone/ - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store-gateway.sharding-ring.tokens-file-path=/data/tokens - -store-gateway.sharding-ring.unregister-on-shutdown=false - -store-gateway.sharding-ring.wait-stability-min-duration=1m @@ -2400,6 +2086,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready diff --git a/operations/mimir-tests/test-multi-zone-with-ongoing-migration-generated.yaml b/operations/mimir-tests/test-multi-zone-with-ongoing-migration-generated.yaml index b90dbede3a..86b39b2e87 100644 --- a/operations/mimir-tests/test-multi-zone-with-ongoing-migration-generated.yaml +++ b/operations/mimir-tests/test-multi-zone-with-ongoing-migration-generated.yaml @@ -70,230 +70,11 @@ spec: --- apiVersion: v1 kind: ServiceAccount -metadata: - name: consul-sidekick - namespace: default ---- -apiVersion: v1 -kind: ServiceAccount metadata: name: rollout-operator namespace: default --- apiVersion: v1 -data: - consul-config.json: '{"leave_on_terminate": true, "raft_snapshot_threshold": 128, - "raft_trailing_logs": 10000, "telemetry": {"dogstatsd_addr": "127.0.0.1:9125"}}' - mapping: | - mappings: - - match: consul.*.runtime.* - name: consul_runtime - labels: - type: $2 - - match: consul.runtime.total_gc_pause_ns - name: consul_runtime_total_gc_pause_ns - labels: - type: $2 - - match: consul.consul.health.service.query-tag.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3 - - match: consul.consul.health.service.query-tag.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4 - - match: consul.consul.health.service.query-tag.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11.$12 - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.dns.domain_query.*.*.*.*.* - name: consul_dns_domain_query - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.not-found.* - name: consul_health_service_not_found - labels: - query: $1 - - match: consul.consul.health.service.query.* - name: consul_health_service_query - labels: - query: $1 - - match: consul.*.memberlist.health.score - name: consul_memberlist_health_score - labels: {} - - match: consul.serf.queue.* - name: consul_serf_events - labels: - type: $1 - - match: consul.serf.snapshot.appendLine - name: consul_serf_snapshot_appendLine - labels: - type: $1 - - match: consul.serf.coordinate.adjustment-ms - name: consul_serf_coordinate_adjustment_ms - labels: {} - - match: consul.consul.rpc.query - name: consul_rpc_query - labels: {} - - match: consul.*.consul.session_ttl.active - name: consul_session_ttl_active - labels: {} - - match: consul.raft.rpc.* - name: consul_raft_rpc - labels: - type: $1 - - match: consul.raft.rpc.appendEntries.storeLogs - name: consul_raft_rpc_appendEntries_storeLogs - labels: - type: $1 - - match: consul.consul.fsm.persist - name: consul_fsm_persist - labels: {} - - match: consul.raft.fsm.apply - name: consul_raft_fsm_apply - labels: {} - - match: consul.raft.leader.lastContact - name: consul_raft_leader_lastcontact - labels: {} - - match: consul.raft.leader.dispatchLog - name: consul_raft_leader_dispatchLog - labels: {} - - match: consul.raft.commitTime - name: consul_raft_commitTime - labels: {} - - match: consul.raft.replication.appendEntries.logs.*.*.*.* - name: consul_raft_replication_appendEntries_logs - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.appendEntries.rpc.*.*.*.* - name: consul_raft_replication_appendEntries_rpc - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.heartbeat.*.*.*.* - name: consul_raft_replication_heartbeat - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.consul.rpc.request - name: consul_rpc_requests - labels: {} - - match: consul.consul.rpc.accept_conn - name: consul_rpc_accept_conn - labels: {} - - match: consul.memberlist.udp.* - name: consul_memberlist_udp - labels: - type: $1 - - match: consul.memberlist.tcp.* - name: consul_memberlist_tcp - labels: - type: $1 - - match: consul.memberlist.gossip - name: consul_memberlist_gossip - labels: {} - - match: consul.memberlist.probeNode - name: consul_memberlist_probenode - labels: {} - - match: consul.memberlist.pushPullNode - name: consul_memberlist_pushpullnode - labels: {} - - match: consul.http.* - name: consul_http_request - labels: - method: $1 - path: / - - match: consul.http.*.* - name: consul_http_request - labels: - method: $1 - path: /$2 - - match: consul.http.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3 - - match: consul.http.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4 - - match: consul.http.*.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4/$5 - - match: consul.consul.leader.barrier - name: consul_leader_barrier - labels: {} - - match: consul.consul.leader.reconcileMember - name: consul_leader_reconcileMember - labels: {} - - match: consul.consul.leader.reconcile - name: consul_leader_reconcile - labels: {} - - match: consul.consul.fsm.coordinate.batch-update - name: consul_fsm_coordinate_batch_update - labels: {} - - match: consul.consul.fsm.autopilot - name: consul_fsm_autopilot - labels: {} - - match: consul.consul.fsm.kvs.cas - name: consul_fsm_kvs_cas - labels: {} - - match: consul.consul.fsm.register - name: consul_fsm_register - labels: {} - - match: consul.consul.fsm.deregister - name: consul_fsm_deregister - labels: {} - - match: consul.consul.fsm.tombstone.reap - name: consul_fsm_tombstone_reap - labels: {} - - match: consul.consul.catalog.register - name: consul_catalog_register - labels: {} - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.leader.reapTombstones - name: consul_leader_reapTombstones - labels: {} -kind: ConfigMap -metadata: - name: consul - namespace: default ---- -apiVersion: v1 data: overrides.yaml: | overrides: {} @@ -304,24 +85,6 @@ metadata: --- apiVersion: rbac.authorization.k8s.io/v1 kind: Role -metadata: - name: consul-sidekick - namespace: default -rules: -- apiGroups: - - "" - - extensions - - apps - resources: - - pods - - replicasets - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role metadata: name: rollout-operator-role namespace: default @@ -352,20 +115,6 @@ rules: --- apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding -metadata: - name: consul-sidekick - namespace: default -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: consul-sidekick -subjects: -- kind: ServiceAccount - name: consul-sidekick - namespace: default ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding metadata: name: rollout-operator-rolebinding namespace: default @@ -394,6 +143,9 @@ spec: - name: alertmanager-grpc port: 9095 targetPort: 9095 + - name: alertmanager-gossip-ring + port: 7946 + targetPort: 7946 selector: name: alertmanager --- @@ -413,41 +165,14 @@ spec: - name: compactor-grpc port: 9095 targetPort: 9095 + - name: compactor-gossip-ring + port: 7946 + targetPort: 7946 selector: name: compactor --- apiVersion: v1 kind: Service -metadata: - labels: - name: consul - name: consul - namespace: default -spec: - ports: - - name: consul-server - port: 8300 - targetPort: 8300 - - name: consul-serf - port: 8301 - targetPort: 8301 - - name: consul-client - port: 8400 - targetPort: 8400 - - name: consul-api - port: 8500 - targetPort: 8500 - - name: statsd-exporter-http-metrics - port: 8000 - targetPort: 8000 - - name: consul-exporter-http-metrics - port: 9107 - targetPort: 9107 - selector: - name: consul ---- -apiVersion: v1 -kind: Service metadata: labels: name: distributor @@ -462,11 +187,29 @@ spec: - name: distributor-grpc port: 9095 targetPort: 9095 + - name: distributor-gossip-ring + port: 7946 + targetPort: 7946 selector: name: distributor --- apiVersion: v1 kind: Service +metadata: + name: gossip-ring + namespace: default +spec: + clusterIP: None + ports: + - name: gossip-ring + port: 7946 + protocol: TCP + targetPort: 7946 + selector: + gossip_ring_member: "true" +--- +apiVersion: v1 +kind: Service metadata: labels: name: ingester @@ -480,6 +223,9 @@ spec: - name: ingester-grpc port: 9095 targetPort: 9095 + - name: ingester-gossip-ring + port: 7946 + targetPort: 7946 selector: name: ingester --- @@ -499,6 +245,9 @@ spec: - name: ingester-grpc port: 9095 targetPort: 9095 + - name: ingester-gossip-ring + port: 7946 + targetPort: 7946 selector: name: ingester-zone-a rollout-group: ingester @@ -519,6 +268,9 @@ spec: - name: ingester-grpc port: 9095 targetPort: 9095 + - name: ingester-gossip-ring + port: 7946 + targetPort: 7946 selector: name: ingester-zone-b rollout-group: ingester @@ -539,6 +291,9 @@ spec: - name: ingester-grpc port: 9095 targetPort: 9095 + - name: ingester-gossip-ring + port: 7946 + targetPort: 7946 selector: name: ingester-zone-c rollout-group: ingester @@ -634,6 +389,9 @@ spec: - name: querier-grpc port: 9095 targetPort: 9095 + - name: querier-gossip-ring + port: 7946 + targetPort: 7946 selector: name: querier --- @@ -746,6 +504,9 @@ spec: - name: store-gateway-grpc port: 9095 targetPort: 9095 + - name: store-gateway-gossip-ring + port: 7946 + targetPort: 7946 selector: name: store-gateway --- @@ -781,6 +542,9 @@ spec: - name: store-gateway-grpc port: 9095 targetPort: 9095 + - name: store-gateway-gossip-ring + port: 7946 + targetPort: 7946 selector: name: store-gateway-zone-a rollout-group: store-gateway @@ -801,6 +565,9 @@ spec: - name: store-gateway-grpc port: 9095 targetPort: 9095 + - name: store-gateway-gossip-ring + port: 7946 + targetPort: 7946 selector: name: store-gateway-zone-b rollout-group: store-gateway @@ -821,137 +588,15 @@ spec: - name: store-gateway-grpc port: 9095 targetPort: 9095 + - name: store-gateway-gossip-ring + port: 7946 + targetPort: 7946 selector: name: store-gateway-zone-c rollout-group: store-gateway --- apiVersion: apps/v1 kind: Deployment -metadata: - name: consul - namespace: default -spec: - minReadySeconds: 10 - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: consul - template: - metadata: - annotations: - consul-hash: e56ef6821a3557604caccaf6d5820239 - labels: - name: consul - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: consul - topologyKey: kubernetes.io/hostname - - labelSelector: - matchLabels: - name: ingester - namespaces: - - default - topologyKey: kubernetes.io/hostname - containers: - - args: - - agent - - -ui - - -server - - -client=0.0.0.0 - - -config-file=/etc/config/consul-config.json - - -bootstrap-expect=1 - - -ui-content-path=/default/consul/ - env: - - name: CHECKPOINT_DISABLE - value: "1" - image: consul:1.5.3 - imagePullPolicy: IfNotPresent - name: consul - ports: - - containerPort: 8300 - name: server - - containerPort: 8301 - name: serf - - containerPort: 8400 - name: client - - containerPort: 8500 - name: api - resources: - requests: - cpu: "4" - memory: 4Gi - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --namespace=$(POD_NAMESPACE) - - --pod-name=$(POD_NAME) - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: weaveworks/consul-sidekick:master-f18ad13 - imagePullPolicy: IfNotPresent - name: sidekick - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --web.listen-address=:8000 - - --statsd.mapping-config=/etc/config/mapping - image: prom/statsd-exporter:v0.12.2 - imagePullPolicy: IfNotPresent - name: statsd-exporter - ports: - - containerPort: 8000 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --consul.server=localhost:8500 - - --web.listen-address=:9107 - - --consul.timeout=1s - - --no-consul.health-summary - - --consul.allow_stale - image: prom/consul-exporter:v0.5.0 - imagePullPolicy: IfNotPresent - name: consul-exporter - ports: - - containerPort: 9107 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - serviceAccount: consul-sidekick - volumes: - - configMap: - name: consul - name: consul - - emptyDir: - medium: Memory - name: data ---- -apiVersion: apps/v1 -kind: Deployment metadata: name: distributor namespace: default @@ -969,6 +614,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: distributor spec: affinity: @@ -988,16 +634,17 @@ spec: - -distributor.health-check-ingesters=true - -distributor.ingestion-burst-size=200000 - -distributor.ingestion-rate-limit=10000 - - -distributor.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -distributor.ring.prefix= - - -distributor.ring.store=consul - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -distributor.ring.store=memberlist - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -ingester.ring.zone-awareness-enabled=true - -mem-ballast-size-bytes=1073741824 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.max-connection-age=2m - -server.grpc.keepalive.max-connection-age-grace=5m @@ -1014,6 +661,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1053,6 +702,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: querier spec: affinity: @@ -1073,13 +723,15 @@ spec: - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - -distributor.health-check-ingesters=true - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -ingester.ring.zone-awareness-enabled=true - -mem-ballast-size-bytes=268435456 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 - -querier.max-concurrent=8 - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 @@ -1088,10 +740,9 @@ spec: - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - -server.http-write-timeout=1m - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix=multi-zone/ - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store-gateway.sharding-ring.zone-awareness-enabled=true - -store.max-query-length=768h - -target=querier @@ -1106,6 +757,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1325,6 +978,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ruler spec: affinity: @@ -1345,28 +999,28 @@ spec: - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - -distributor.health-check-ingesters=true - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -ingester.ring.zone-awareness-enabled=true + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.backend=gcs - -ruler-storage.gcs.bucket-name=rules-bucket - -ruler.alertmanager-url=http://alertmanager.default.svc.cluster.local/alertmanager - -ruler.max-rule-groups-per-tenant=35 - -ruler.max-rules-per-rule-group=20 - - -ruler.ring.consul.hostname=consul.default.svc.cluster.local:8500 - - -ruler.ring.store=consul + - -ruler.ring.store=memberlist - -ruler.rule-path=/rules - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix=multi-zone/ - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store-gateway.sharding-ring.zone-awareness-enabled=true - -store.max-query-length=768h - -target=ruler @@ -1416,17 +1070,20 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: alertmanager spec: containers: - args: - -alertmanager-storage.backend=gcs - -alertmanager-storage.gcs.bucket-name=alerts-bucket - - -alertmanager.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -alertmanager.sharding-ring.replication-factor=3 - - -alertmanager.sharding-ring.store=consul + - -alertmanager.sharding-ring.store=memberlist - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true @@ -1445,6 +1102,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1500,6 +1159,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: compactor spec: containers: @@ -1516,13 +1176,15 @@ spec: - -compactor.deletion-delay=2h - -compactor.max-closing-blocks-concurrency=2 - -compactor.max-opening-blocks-concurrency=4 - - -compactor.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -compactor.ring.prefix= - - -compactor.ring.store=consul + - -compactor.ring.store=memberlist - -compactor.ring.wait-stability-min-duration=1m - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true @@ -1536,6 +1198,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1592,6 +1256,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ingester spec: affinity: @@ -1611,16 +1276,18 @@ spec: - -distributor.health-check-ingesters=true - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-period=15s - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.num-tokens=512 - -ingester.ring.prefix= - -ingester.ring.readiness-check-ring-health=false - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc-max-concurrent-streams=10000 - -server.grpc.keepalive.min-time-between-pings=10s @@ -1635,6 +1302,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1694,6 +1363,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ingester-zone-a rollout-group: ingester spec: @@ -1721,7 +1391,6 @@ spec: - -distributor.health-check-ingesters=true - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-period=15s - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.instance-availability-zone=zone-a @@ -1729,10 +1398,13 @@ spec: - -ingester.ring.prefix= - -ingester.ring.readiness-check-ring-health=false - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - -ingester.ring.zone-awareness-enabled=true + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc-max-concurrent-streams=10000 - -server.grpc.keepalive.min-time-between-pings=10s @@ -1747,6 +1419,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1806,6 +1480,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ingester-zone-b rollout-group: ingester spec: @@ -1833,7 +1508,6 @@ spec: - -distributor.health-check-ingesters=true - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-period=15s - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.instance-availability-zone=zone-b @@ -1841,10 +1515,13 @@ spec: - -ingester.ring.prefix= - -ingester.ring.readiness-check-ring-health=false - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - -ingester.ring.zone-awareness-enabled=true + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc-max-concurrent-streams=10000 - -server.grpc.keepalive.min-time-between-pings=10s @@ -1859,6 +1536,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1918,6 +1597,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ingester-zone-c rollout-group: ingester spec: @@ -1945,7 +1625,6 @@ spec: - -distributor.health-check-ingesters=true - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-period=15s - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.instance-availability-zone=zone-c @@ -1953,10 +1632,13 @@ spec: - -ingester.ring.prefix= - -ingester.ring.readiness-check-ring-health=false - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - -ingester.ring.zone-awareness-enabled=true + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc-max-concurrent-streams=10000 - -server.grpc.keepalive.min-time-between-pings=10s @@ -1971,6 +1653,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -2239,6 +1923,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: store-gateway spec: affinity: @@ -2276,14 +1961,16 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store-gateway.sharding-ring.tokens-file-path=/data/tokens - -store-gateway.sharding-ring.wait-stability-min-duration=1m - -target=store-gateway @@ -2295,6 +1982,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -2354,6 +2043,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: store-gateway-zone-a rollout-group: store-gateway spec: @@ -2399,15 +2089,17 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.instance-availability-zone=zone-a - -store-gateway.sharding-ring.prefix=multi-zone/ - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store-gateway.sharding-ring.tokens-file-path=/data/tokens - -store-gateway.sharding-ring.unregister-on-shutdown=false - -store-gateway.sharding-ring.wait-stability-min-duration=1m @@ -2421,6 +2113,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -2480,6 +2174,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: store-gateway-zone-b rollout-group: store-gateway spec: @@ -2525,15 +2220,17 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.instance-availability-zone=zone-b - -store-gateway.sharding-ring.prefix=multi-zone/ - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store-gateway.sharding-ring.tokens-file-path=/data/tokens - -store-gateway.sharding-ring.unregister-on-shutdown=false - -store-gateway.sharding-ring.wait-stability-min-duration=1m @@ -2547,6 +2244,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -2606,6 +2305,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: store-gateway-zone-c rollout-group: store-gateway spec: @@ -2651,15 +2351,17 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.instance-availability-zone=zone-c - -store-gateway.sharding-ring.prefix=multi-zone/ - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store-gateway.sharding-ring.tokens-file-path=/data/tokens - -store-gateway.sharding-ring.unregister-on-shutdown=false - -store-gateway.sharding-ring.wait-stability-min-duration=1m @@ -2673,6 +2375,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready diff --git a/operations/mimir-tests/test-query-sharding-generated.yaml b/operations/mimir-tests/test-query-sharding-generated.yaml index 819171e5f2..cfd3fa5f4b 100644 --- a/operations/mimir-tests/test-query-sharding-generated.yaml +++ b/operations/mimir-tests/test-query-sharding-generated.yaml @@ -43,225 +43,6 @@ spec: name: store-gateway --- apiVersion: v1 -kind: ServiceAccount -metadata: - name: consul-sidekick - namespace: default ---- -apiVersion: v1 -data: - consul-config.json: '{"leave_on_terminate": true, "raft_snapshot_threshold": 128, - "raft_trailing_logs": 10000, "telemetry": {"dogstatsd_addr": "127.0.0.1:9125"}}' - mapping: | - mappings: - - match: consul.*.runtime.* - name: consul_runtime - labels: - type: $2 - - match: consul.runtime.total_gc_pause_ns - name: consul_runtime_total_gc_pause_ns - labels: - type: $2 - - match: consul.consul.health.service.query-tag.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3 - - match: consul.consul.health.service.query-tag.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4 - - match: consul.consul.health.service.query-tag.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11.$12 - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.dns.domain_query.*.*.*.*.* - name: consul_dns_domain_query - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.not-found.* - name: consul_health_service_not_found - labels: - query: $1 - - match: consul.consul.health.service.query.* - name: consul_health_service_query - labels: - query: $1 - - match: consul.*.memberlist.health.score - name: consul_memberlist_health_score - labels: {} - - match: consul.serf.queue.* - name: consul_serf_events - labels: - type: $1 - - match: consul.serf.snapshot.appendLine - name: consul_serf_snapshot_appendLine - labels: - type: $1 - - match: consul.serf.coordinate.adjustment-ms - name: consul_serf_coordinate_adjustment_ms - labels: {} - - match: consul.consul.rpc.query - name: consul_rpc_query - labels: {} - - match: consul.*.consul.session_ttl.active - name: consul_session_ttl_active - labels: {} - - match: consul.raft.rpc.* - name: consul_raft_rpc - labels: - type: $1 - - match: consul.raft.rpc.appendEntries.storeLogs - name: consul_raft_rpc_appendEntries_storeLogs - labels: - type: $1 - - match: consul.consul.fsm.persist - name: consul_fsm_persist - labels: {} - - match: consul.raft.fsm.apply - name: consul_raft_fsm_apply - labels: {} - - match: consul.raft.leader.lastContact - name: consul_raft_leader_lastcontact - labels: {} - - match: consul.raft.leader.dispatchLog - name: consul_raft_leader_dispatchLog - labels: {} - - match: consul.raft.commitTime - name: consul_raft_commitTime - labels: {} - - match: consul.raft.replication.appendEntries.logs.*.*.*.* - name: consul_raft_replication_appendEntries_logs - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.appendEntries.rpc.*.*.*.* - name: consul_raft_replication_appendEntries_rpc - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.heartbeat.*.*.*.* - name: consul_raft_replication_heartbeat - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.consul.rpc.request - name: consul_rpc_requests - labels: {} - - match: consul.consul.rpc.accept_conn - name: consul_rpc_accept_conn - labels: {} - - match: consul.memberlist.udp.* - name: consul_memberlist_udp - labels: - type: $1 - - match: consul.memberlist.tcp.* - name: consul_memberlist_tcp - labels: - type: $1 - - match: consul.memberlist.gossip - name: consul_memberlist_gossip - labels: {} - - match: consul.memberlist.probeNode - name: consul_memberlist_probenode - labels: {} - - match: consul.memberlist.pushPullNode - name: consul_memberlist_pushpullnode - labels: {} - - match: consul.http.* - name: consul_http_request - labels: - method: $1 - path: / - - match: consul.http.*.* - name: consul_http_request - labels: - method: $1 - path: /$2 - - match: consul.http.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3 - - match: consul.http.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4 - - match: consul.http.*.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4/$5 - - match: consul.consul.leader.barrier - name: consul_leader_barrier - labels: {} - - match: consul.consul.leader.reconcileMember - name: consul_leader_reconcileMember - labels: {} - - match: consul.consul.leader.reconcile - name: consul_leader_reconcile - labels: {} - - match: consul.consul.fsm.coordinate.batch-update - name: consul_fsm_coordinate_batch_update - labels: {} - - match: consul.consul.fsm.autopilot - name: consul_fsm_autopilot - labels: {} - - match: consul.consul.fsm.kvs.cas - name: consul_fsm_kvs_cas - labels: {} - - match: consul.consul.fsm.register - name: consul_fsm_register - labels: {} - - match: consul.consul.fsm.deregister - name: consul_fsm_deregister - labels: {} - - match: consul.consul.fsm.tombstone.reap - name: consul_fsm_tombstone_reap - labels: {} - - match: consul.consul.catalog.register - name: consul_catalog_register - labels: {} - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.leader.reapTombstones - name: consul_leader_reapTombstones - labels: {} -kind: ConfigMap -metadata: - name: consul - namespace: default ---- -apiVersion: v1 data: overrides.yaml: | overrides: {} @@ -270,38 +51,6 @@ metadata: name: overrides namespace: default --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: consul-sidekick - namespace: default -rules: -- apiGroups: - - "" - - extensions - - apps - resources: - - pods - - replicasets - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: consul-sidekick - namespace: default -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: consul-sidekick -subjects: -- kind: ServiceAccount - name: consul-sidekick - namespace: default ---- apiVersion: v1 kind: Service metadata: @@ -318,6 +67,9 @@ spec: - name: alertmanager-grpc port: 9095 targetPort: 9095 + - name: alertmanager-gossip-ring + port: 7946 + targetPort: 7946 selector: name: alertmanager --- @@ -337,41 +89,14 @@ spec: - name: compactor-grpc port: 9095 targetPort: 9095 + - name: compactor-gossip-ring + port: 7946 + targetPort: 7946 selector: name: compactor --- apiVersion: v1 kind: Service -metadata: - labels: - name: consul - name: consul - namespace: default -spec: - ports: - - name: consul-server - port: 8300 - targetPort: 8300 - - name: consul-serf - port: 8301 - targetPort: 8301 - - name: consul-client - port: 8400 - targetPort: 8400 - - name: consul-api - port: 8500 - targetPort: 8500 - - name: statsd-exporter-http-metrics - port: 8000 - targetPort: 8000 - - name: consul-exporter-http-metrics - port: 9107 - targetPort: 9107 - selector: - name: consul ---- -apiVersion: v1 -kind: Service metadata: labels: name: distributor @@ -386,11 +111,29 @@ spec: - name: distributor-grpc port: 9095 targetPort: 9095 + - name: distributor-gossip-ring + port: 7946 + targetPort: 7946 selector: name: distributor --- apiVersion: v1 kind: Service +metadata: + name: gossip-ring + namespace: default +spec: + clusterIP: None + ports: + - name: gossip-ring + port: 7946 + protocol: TCP + targetPort: 7946 + selector: + gossip_ring_member: "true" +--- +apiVersion: v1 +kind: Service metadata: labels: name: ingester @@ -404,6 +147,9 @@ spec: - name: ingester-grpc port: 9095 targetPort: 9095 + - name: ingester-gossip-ring + port: 7946 + targetPort: 7946 selector: name: ingester --- @@ -498,6 +244,9 @@ spec: - name: querier-grpc port: 9095 targetPort: 9095 + - name: querier-gossip-ring + port: 7946 + targetPort: 7946 selector: name: querier --- @@ -610,136 +359,14 @@ spec: - name: store-gateway-grpc port: 9095 targetPort: 9095 + - name: store-gateway-gossip-ring + port: 7946 + targetPort: 7946 selector: name: store-gateway --- apiVersion: apps/v1 kind: Deployment -metadata: - name: consul - namespace: default -spec: - minReadySeconds: 10 - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: consul - template: - metadata: - annotations: - consul-hash: e56ef6821a3557604caccaf6d5820239 - labels: - name: consul - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: consul - topologyKey: kubernetes.io/hostname - - labelSelector: - matchLabels: - name: ingester - namespaces: - - default - topologyKey: kubernetes.io/hostname - containers: - - args: - - agent - - -ui - - -server - - -client=0.0.0.0 - - -config-file=/etc/config/consul-config.json - - -bootstrap-expect=1 - - -ui-content-path=/default/consul/ - env: - - name: CHECKPOINT_DISABLE - value: "1" - image: consul:1.5.3 - imagePullPolicy: IfNotPresent - name: consul - ports: - - containerPort: 8300 - name: server - - containerPort: 8301 - name: serf - - containerPort: 8400 - name: client - - containerPort: 8500 - name: api - resources: - requests: - cpu: "4" - memory: 4Gi - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --namespace=$(POD_NAMESPACE) - - --pod-name=$(POD_NAME) - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: weaveworks/consul-sidekick:master-f18ad13 - imagePullPolicy: IfNotPresent - name: sidekick - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --web.listen-address=:8000 - - --statsd.mapping-config=/etc/config/mapping - image: prom/statsd-exporter:v0.12.2 - imagePullPolicy: IfNotPresent - name: statsd-exporter - ports: - - containerPort: 8000 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --consul.server=localhost:8500 - - --web.listen-address=:9107 - - --consul.timeout=1s - - --no-consul.health-summary - - --consul.allow_stale - image: prom/consul-exporter:v0.5.0 - imagePullPolicy: IfNotPresent - name: consul-exporter - ports: - - containerPort: 9107 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - serviceAccount: consul-sidekick - volumes: - - configMap: - name: consul - name: consul - - emptyDir: - medium: Memory - name: data ---- -apiVersion: apps/v1 -kind: Deployment metadata: name: distributor namespace: default @@ -757,6 +384,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: distributor spec: affinity: @@ -776,15 +404,16 @@ spec: - -distributor.health-check-ingesters=true - -distributor.ingestion-burst-size=200000 - -distributor.ingestion-rate-limit=10000 - - -distributor.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -distributor.ring.prefix= - - -distributor.ring.store=consul - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -distributor.ring.store=memberlist - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=1073741824 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.max-connection-age=2m - -server.grpc.keepalive.max-connection-age-grace=5m @@ -801,6 +430,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -840,6 +471,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: querier spec: affinity: @@ -860,12 +492,14 @@ spec: - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - -distributor.health-check-ingesters=true - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=419430400 - -querier.max-concurrent=16 - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 @@ -874,10 +508,9 @@ spec: - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - -server.http-write-timeout=1m - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store.max-query-length=768h - -target=querier env: @@ -891,6 +524,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1070,6 +705,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ruler spec: affinity: @@ -1090,27 +726,27 @@ spec: - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - -distributor.health-check-ingesters=true - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.backend=gcs - -ruler-storage.gcs.bucket-name=rules-bucket - -ruler.alertmanager-url=http://alertmanager.default.svc.cluster.local/alertmanager - -ruler.max-rule-groups-per-tenant=35 - -ruler.max-rules-per-rule-group=20 - - -ruler.ring.consul.hostname=consul.default.svc.cluster.local:8500 - - -ruler.ring.store=consul + - -ruler.ring.store=memberlist - -ruler.rule-path=/rules - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store.max-query-length=768h - -target=ruler image: grafana/mimir:2.1.0 @@ -1159,17 +795,20 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: alertmanager spec: containers: - args: - -alertmanager-storage.backend=gcs - -alertmanager-storage.gcs.bucket-name=alerts-bucket - - -alertmanager.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -alertmanager.sharding-ring.replication-factor=3 - - -alertmanager.sharding-ring.store=consul + - -alertmanager.sharding-ring.store=memberlist - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true @@ -1188,6 +827,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1243,6 +884,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: compactor spec: containers: @@ -1259,13 +901,15 @@ spec: - -compactor.deletion-delay=2h - -compactor.max-closing-blocks-concurrency=2 - -compactor.max-opening-blocks-concurrency=4 - - -compactor.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -compactor.ring.prefix= - - -compactor.ring.store=consul + - -compactor.ring.store=memberlist - -compactor.ring.wait-stability-min-duration=1m - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true @@ -1279,6 +923,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1335,6 +981,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ingester spec: affinity: @@ -1354,16 +1001,18 @@ spec: - -distributor.health-check-ingesters=true - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-period=15s - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.num-tokens=512 - -ingester.ring.prefix= - -ingester.ring.readiness-check-ring-health=false - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc-max-concurrent-streams=10000 - -server.grpc.keepalive.min-time-between-pings=10s @@ -1378,6 +1027,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1646,6 +1297,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: store-gateway spec: affinity: @@ -1683,14 +1335,16 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store-gateway.sharding-ring.tokens-file-path=/data/tokens - -store-gateway.sharding-ring.wait-stability-min-duration=1m - -target=store-gateway @@ -1702,6 +1356,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready diff --git a/operations/mimir-tests/test-ruler-remote-evaluation-generated.yaml b/operations/mimir-tests/test-ruler-remote-evaluation-generated.yaml index 7ec949a8eb..94e2276f6f 100644 --- a/operations/mimir-tests/test-ruler-remote-evaluation-generated.yaml +++ b/operations/mimir-tests/test-ruler-remote-evaluation-generated.yaml @@ -43,225 +43,6 @@ spec: name: store-gateway --- apiVersion: v1 -kind: ServiceAccount -metadata: - name: consul-sidekick - namespace: default ---- -apiVersion: v1 -data: - consul-config.json: '{"leave_on_terminate": true, "raft_snapshot_threshold": 128, - "raft_trailing_logs": 10000, "telemetry": {"dogstatsd_addr": "127.0.0.1:9125"}}' - mapping: | - mappings: - - match: consul.*.runtime.* - name: consul_runtime - labels: - type: $2 - - match: consul.runtime.total_gc_pause_ns - name: consul_runtime_total_gc_pause_ns - labels: - type: $2 - - match: consul.consul.health.service.query-tag.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3 - - match: consul.consul.health.service.query-tag.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4 - - match: consul.consul.health.service.query-tag.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11.$12 - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.dns.domain_query.*.*.*.*.* - name: consul_dns_domain_query - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.not-found.* - name: consul_health_service_not_found - labels: - query: $1 - - match: consul.consul.health.service.query.* - name: consul_health_service_query - labels: - query: $1 - - match: consul.*.memberlist.health.score - name: consul_memberlist_health_score - labels: {} - - match: consul.serf.queue.* - name: consul_serf_events - labels: - type: $1 - - match: consul.serf.snapshot.appendLine - name: consul_serf_snapshot_appendLine - labels: - type: $1 - - match: consul.serf.coordinate.adjustment-ms - name: consul_serf_coordinate_adjustment_ms - labels: {} - - match: consul.consul.rpc.query - name: consul_rpc_query - labels: {} - - match: consul.*.consul.session_ttl.active - name: consul_session_ttl_active - labels: {} - - match: consul.raft.rpc.* - name: consul_raft_rpc - labels: - type: $1 - - match: consul.raft.rpc.appendEntries.storeLogs - name: consul_raft_rpc_appendEntries_storeLogs - labels: - type: $1 - - match: consul.consul.fsm.persist - name: consul_fsm_persist - labels: {} - - match: consul.raft.fsm.apply - name: consul_raft_fsm_apply - labels: {} - - match: consul.raft.leader.lastContact - name: consul_raft_leader_lastcontact - labels: {} - - match: consul.raft.leader.dispatchLog - name: consul_raft_leader_dispatchLog - labels: {} - - match: consul.raft.commitTime - name: consul_raft_commitTime - labels: {} - - match: consul.raft.replication.appendEntries.logs.*.*.*.* - name: consul_raft_replication_appendEntries_logs - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.appendEntries.rpc.*.*.*.* - name: consul_raft_replication_appendEntries_rpc - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.heartbeat.*.*.*.* - name: consul_raft_replication_heartbeat - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.consul.rpc.request - name: consul_rpc_requests - labels: {} - - match: consul.consul.rpc.accept_conn - name: consul_rpc_accept_conn - labels: {} - - match: consul.memberlist.udp.* - name: consul_memberlist_udp - labels: - type: $1 - - match: consul.memberlist.tcp.* - name: consul_memberlist_tcp - labels: - type: $1 - - match: consul.memberlist.gossip - name: consul_memberlist_gossip - labels: {} - - match: consul.memberlist.probeNode - name: consul_memberlist_probenode - labels: {} - - match: consul.memberlist.pushPullNode - name: consul_memberlist_pushpullnode - labels: {} - - match: consul.http.* - name: consul_http_request - labels: - method: $1 - path: / - - match: consul.http.*.* - name: consul_http_request - labels: - method: $1 - path: /$2 - - match: consul.http.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3 - - match: consul.http.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4 - - match: consul.http.*.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4/$5 - - match: consul.consul.leader.barrier - name: consul_leader_barrier - labels: {} - - match: consul.consul.leader.reconcileMember - name: consul_leader_reconcileMember - labels: {} - - match: consul.consul.leader.reconcile - name: consul_leader_reconcile - labels: {} - - match: consul.consul.fsm.coordinate.batch-update - name: consul_fsm_coordinate_batch_update - labels: {} - - match: consul.consul.fsm.autopilot - name: consul_fsm_autopilot - labels: {} - - match: consul.consul.fsm.kvs.cas - name: consul_fsm_kvs_cas - labels: {} - - match: consul.consul.fsm.register - name: consul_fsm_register - labels: {} - - match: consul.consul.fsm.deregister - name: consul_fsm_deregister - labels: {} - - match: consul.consul.fsm.tombstone.reap - name: consul_fsm_tombstone_reap - labels: {} - - match: consul.consul.catalog.register - name: consul_catalog_register - labels: {} - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.leader.reapTombstones - name: consul_leader_reapTombstones - labels: {} -kind: ConfigMap -metadata: - name: consul - namespace: default ---- -apiVersion: v1 data: overrides.yaml: | overrides: {} @@ -270,38 +51,6 @@ metadata: name: overrides namespace: default --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: consul-sidekick - namespace: default -rules: -- apiGroups: - - "" - - extensions - - apps - resources: - - pods - - replicasets - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: consul-sidekick - namespace: default -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: consul-sidekick -subjects: -- kind: ServiceAccount - name: consul-sidekick - namespace: default ---- apiVersion: v1 kind: Service metadata: @@ -318,6 +67,9 @@ spec: - name: alertmanager-grpc port: 9095 targetPort: 9095 + - name: alertmanager-gossip-ring + port: 7946 + targetPort: 7946 selector: name: alertmanager --- @@ -337,41 +89,14 @@ spec: - name: compactor-grpc port: 9095 targetPort: 9095 + - name: compactor-gossip-ring + port: 7946 + targetPort: 7946 selector: name: compactor --- apiVersion: v1 kind: Service -metadata: - labels: - name: consul - name: consul - namespace: default -spec: - ports: - - name: consul-server - port: 8300 - targetPort: 8300 - - name: consul-serf - port: 8301 - targetPort: 8301 - - name: consul-client - port: 8400 - targetPort: 8400 - - name: consul-api - port: 8500 - targetPort: 8500 - - name: statsd-exporter-http-metrics - port: 8000 - targetPort: 8000 - - name: consul-exporter-http-metrics - port: 9107 - targetPort: 9107 - selector: - name: consul ---- -apiVersion: v1 -kind: Service metadata: labels: name: distributor @@ -386,11 +111,29 @@ spec: - name: distributor-grpc port: 9095 targetPort: 9095 + - name: distributor-gossip-ring + port: 7946 + targetPort: 7946 selector: name: distributor --- apiVersion: v1 kind: Service +metadata: + name: gossip-ring + namespace: default +spec: + clusterIP: None + ports: + - name: gossip-ring + port: 7946 + protocol: TCP + targetPort: 7946 + selector: + gossip_ring_member: "true" +--- +apiVersion: v1 +kind: Service metadata: labels: name: ingester @@ -404,6 +147,9 @@ spec: - name: ingester-grpc port: 9095 targetPort: 9095 + - name: ingester-gossip-ring + port: 7946 + targetPort: 7946 selector: name: ingester --- @@ -498,6 +244,9 @@ spec: - name: querier-grpc port: 9095 targetPort: 9095 + - name: querier-gossip-ring + port: 7946 + targetPort: 7946 selector: name: querier --- @@ -610,6 +359,9 @@ spec: - name: ruler-querier-grpc port: 9095 targetPort: 9095 + - name: ruler-querier-gossip-ring + port: 7946 + targetPort: 7946 selector: name: ruler-querier --- @@ -685,136 +437,14 @@ spec: - name: store-gateway-grpc port: 9095 targetPort: 9095 + - name: store-gateway-gossip-ring + port: 7946 + targetPort: 7946 selector: name: store-gateway --- apiVersion: apps/v1 kind: Deployment -metadata: - name: consul - namespace: default -spec: - minReadySeconds: 10 - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: consul - template: - metadata: - annotations: - consul-hash: e56ef6821a3557604caccaf6d5820239 - labels: - name: consul - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: consul - topologyKey: kubernetes.io/hostname - - labelSelector: - matchLabels: - name: ingester - namespaces: - - default - topologyKey: kubernetes.io/hostname - containers: - - args: - - agent - - -ui - - -server - - -client=0.0.0.0 - - -config-file=/etc/config/consul-config.json - - -bootstrap-expect=1 - - -ui-content-path=/default/consul/ - env: - - name: CHECKPOINT_DISABLE - value: "1" - image: consul:1.5.3 - imagePullPolicy: IfNotPresent - name: consul - ports: - - containerPort: 8300 - name: server - - containerPort: 8301 - name: serf - - containerPort: 8400 - name: client - - containerPort: 8500 - name: api - resources: - requests: - cpu: "4" - memory: 4Gi - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --namespace=$(POD_NAMESPACE) - - --pod-name=$(POD_NAME) - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: weaveworks/consul-sidekick:master-f18ad13 - imagePullPolicy: IfNotPresent - name: sidekick - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --web.listen-address=:8000 - - --statsd.mapping-config=/etc/config/mapping - image: prom/statsd-exporter:v0.12.2 - imagePullPolicy: IfNotPresent - name: statsd-exporter - ports: - - containerPort: 8000 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --consul.server=localhost:8500 - - --web.listen-address=:9107 - - --consul.timeout=1s - - --no-consul.health-summary - - --consul.allow_stale - image: prom/consul-exporter:v0.5.0 - imagePullPolicy: IfNotPresent - name: consul-exporter - ports: - - containerPort: 9107 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - serviceAccount: consul-sidekick - volumes: - - configMap: - name: consul - name: consul - - emptyDir: - medium: Memory - name: data ---- -apiVersion: apps/v1 -kind: Deployment metadata: name: distributor namespace: default @@ -832,6 +462,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: distributor spec: affinity: @@ -851,15 +482,16 @@ spec: - -distributor.health-check-ingesters=true - -distributor.ingestion-burst-size=200000 - -distributor.ingestion-rate-limit=10000 - - -distributor.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -distributor.ring.prefix= - - -distributor.ring.store=consul - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -distributor.ring.store=memberlist - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=1073741824 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.max-connection-age=2m - -server.grpc.keepalive.max-connection-age-grace=5m @@ -876,6 +508,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -915,6 +549,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: querier spec: affinity: @@ -935,12 +570,14 @@ spec: - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - -distributor.health-check-ingesters=true - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 - -querier.max-concurrent=8 - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 @@ -949,10 +586,9 @@ spec: - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - -server.http-write-timeout=1m - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store.max-query-length=768h - -target=querier env: @@ -966,6 +602,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1140,6 +778,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ruler spec: affinity: @@ -1160,28 +799,28 @@ spec: - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - -distributor.health-check-ingesters=true - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.backend=gcs - -ruler-storage.gcs.bucket-name=rules-bucket - -ruler.alertmanager-url=http://alertmanager.default.svc.cluster.local/alertmanager - -ruler.max-rule-groups-per-tenant=35 - -ruler.max-rules-per-rule-group=20 - -ruler.query-frontend.address=dns:///ruler-query-frontend.default.svc.cluster.local:9095 - - -ruler.ring.consul.hostname=consul.default.svc.cluster.local:8500 - - -ruler.ring.store=consul + - -ruler.ring.store=memberlist - -ruler.rule-path=/rules - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store.max-query-length=768h - -target=ruler image: grafana/mimir:2.1.0 @@ -1253,12 +892,14 @@ spec: - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - -distributor.health-check-ingesters=true - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 - -querier.max-concurrent=8 - -querier.scheduler-address=ruler-query-scheduler-discovery.default.svc.cluster.local:9095 @@ -1267,10 +908,9 @@ spec: - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - -server.http-write-timeout=1m - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store.max-query-length=768h - -target=querier env: @@ -1284,6 +924,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1452,17 +1094,20 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: alertmanager spec: containers: - args: - -alertmanager-storage.backend=gcs - -alertmanager-storage.gcs.bucket-name=alerts-bucket - - -alertmanager.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -alertmanager.sharding-ring.replication-factor=3 - - -alertmanager.sharding-ring.store=consul + - -alertmanager.sharding-ring.store=memberlist - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true @@ -1481,6 +1126,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1536,6 +1183,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: compactor spec: containers: @@ -1552,13 +1200,15 @@ spec: - -compactor.deletion-delay=2h - -compactor.max-closing-blocks-concurrency=2 - -compactor.max-opening-blocks-concurrency=4 - - -compactor.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -compactor.ring.prefix= - - -compactor.ring.store=consul + - -compactor.ring.store=memberlist - -compactor.ring.wait-stability-min-duration=1m - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true @@ -1572,6 +1222,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1628,6 +1280,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ingester spec: affinity: @@ -1647,16 +1300,18 @@ spec: - -distributor.health-check-ingesters=true - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-period=15s - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.num-tokens=512 - -ingester.ring.prefix= - -ingester.ring.readiness-check-ring-health=false - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc-max-concurrent-streams=10000 - -server.grpc.keepalive.min-time-between-pings=10s @@ -1671,6 +1326,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1939,6 +1596,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: store-gateway spec: affinity: @@ -1976,14 +1634,16 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store-gateway.sharding-ring.tokens-file-path=/data/tokens - -store-gateway.sharding-ring.wait-stability-min-duration=1m - -target=store-gateway @@ -1995,6 +1655,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready diff --git a/operations/mimir-tests/test-ruler-remote-evaluation-migration-generated.yaml b/operations/mimir-tests/test-ruler-remote-evaluation-migration-generated.yaml index f8eabe1838..1fff2c5d54 100644 --- a/operations/mimir-tests/test-ruler-remote-evaluation-migration-generated.yaml +++ b/operations/mimir-tests/test-ruler-remote-evaluation-migration-generated.yaml @@ -43,225 +43,6 @@ spec: name: store-gateway --- apiVersion: v1 -kind: ServiceAccount -metadata: - name: consul-sidekick - namespace: default ---- -apiVersion: v1 -data: - consul-config.json: '{"leave_on_terminate": true, "raft_snapshot_threshold": 128, - "raft_trailing_logs": 10000, "telemetry": {"dogstatsd_addr": "127.0.0.1:9125"}}' - mapping: | - mappings: - - match: consul.*.runtime.* - name: consul_runtime - labels: - type: $2 - - match: consul.runtime.total_gc_pause_ns - name: consul_runtime_total_gc_pause_ns - labels: - type: $2 - - match: consul.consul.health.service.query-tag.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3 - - match: consul.consul.health.service.query-tag.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4 - - match: consul.consul.health.service.query-tag.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11.$12 - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.dns.domain_query.*.*.*.*.* - name: consul_dns_domain_query - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.not-found.* - name: consul_health_service_not_found - labels: - query: $1 - - match: consul.consul.health.service.query.* - name: consul_health_service_query - labels: - query: $1 - - match: consul.*.memberlist.health.score - name: consul_memberlist_health_score - labels: {} - - match: consul.serf.queue.* - name: consul_serf_events - labels: - type: $1 - - match: consul.serf.snapshot.appendLine - name: consul_serf_snapshot_appendLine - labels: - type: $1 - - match: consul.serf.coordinate.adjustment-ms - name: consul_serf_coordinate_adjustment_ms - labels: {} - - match: consul.consul.rpc.query - name: consul_rpc_query - labels: {} - - match: consul.*.consul.session_ttl.active - name: consul_session_ttl_active - labels: {} - - match: consul.raft.rpc.* - name: consul_raft_rpc - labels: - type: $1 - - match: consul.raft.rpc.appendEntries.storeLogs - name: consul_raft_rpc_appendEntries_storeLogs - labels: - type: $1 - - match: consul.consul.fsm.persist - name: consul_fsm_persist - labels: {} - - match: consul.raft.fsm.apply - name: consul_raft_fsm_apply - labels: {} - - match: consul.raft.leader.lastContact - name: consul_raft_leader_lastcontact - labels: {} - - match: consul.raft.leader.dispatchLog - name: consul_raft_leader_dispatchLog - labels: {} - - match: consul.raft.commitTime - name: consul_raft_commitTime - labels: {} - - match: consul.raft.replication.appendEntries.logs.*.*.*.* - name: consul_raft_replication_appendEntries_logs - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.appendEntries.rpc.*.*.*.* - name: consul_raft_replication_appendEntries_rpc - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.heartbeat.*.*.*.* - name: consul_raft_replication_heartbeat - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.consul.rpc.request - name: consul_rpc_requests - labels: {} - - match: consul.consul.rpc.accept_conn - name: consul_rpc_accept_conn - labels: {} - - match: consul.memberlist.udp.* - name: consul_memberlist_udp - labels: - type: $1 - - match: consul.memberlist.tcp.* - name: consul_memberlist_tcp - labels: - type: $1 - - match: consul.memberlist.gossip - name: consul_memberlist_gossip - labels: {} - - match: consul.memberlist.probeNode - name: consul_memberlist_probenode - labels: {} - - match: consul.memberlist.pushPullNode - name: consul_memberlist_pushpullnode - labels: {} - - match: consul.http.* - name: consul_http_request - labels: - method: $1 - path: / - - match: consul.http.*.* - name: consul_http_request - labels: - method: $1 - path: /$2 - - match: consul.http.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3 - - match: consul.http.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4 - - match: consul.http.*.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4/$5 - - match: consul.consul.leader.barrier - name: consul_leader_barrier - labels: {} - - match: consul.consul.leader.reconcileMember - name: consul_leader_reconcileMember - labels: {} - - match: consul.consul.leader.reconcile - name: consul_leader_reconcile - labels: {} - - match: consul.consul.fsm.coordinate.batch-update - name: consul_fsm_coordinate_batch_update - labels: {} - - match: consul.consul.fsm.autopilot - name: consul_fsm_autopilot - labels: {} - - match: consul.consul.fsm.kvs.cas - name: consul_fsm_kvs_cas - labels: {} - - match: consul.consul.fsm.register - name: consul_fsm_register - labels: {} - - match: consul.consul.fsm.deregister - name: consul_fsm_deregister - labels: {} - - match: consul.consul.fsm.tombstone.reap - name: consul_fsm_tombstone_reap - labels: {} - - match: consul.consul.catalog.register - name: consul_catalog_register - labels: {} - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.leader.reapTombstones - name: consul_leader_reapTombstones - labels: {} -kind: ConfigMap -metadata: - name: consul - namespace: default ---- -apiVersion: v1 data: overrides.yaml: | overrides: {} @@ -270,38 +51,6 @@ metadata: name: overrides namespace: default --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: consul-sidekick - namespace: default -rules: -- apiGroups: - - "" - - extensions - - apps - resources: - - pods - - replicasets - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: consul-sidekick - namespace: default -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: consul-sidekick -subjects: -- kind: ServiceAccount - name: consul-sidekick - namespace: default ---- apiVersion: v1 kind: Service metadata: @@ -318,6 +67,9 @@ spec: - name: alertmanager-grpc port: 9095 targetPort: 9095 + - name: alertmanager-gossip-ring + port: 7946 + targetPort: 7946 selector: name: alertmanager --- @@ -337,41 +89,14 @@ spec: - name: compactor-grpc port: 9095 targetPort: 9095 + - name: compactor-gossip-ring + port: 7946 + targetPort: 7946 selector: name: compactor --- apiVersion: v1 kind: Service -metadata: - labels: - name: consul - name: consul - namespace: default -spec: - ports: - - name: consul-server - port: 8300 - targetPort: 8300 - - name: consul-serf - port: 8301 - targetPort: 8301 - - name: consul-client - port: 8400 - targetPort: 8400 - - name: consul-api - port: 8500 - targetPort: 8500 - - name: statsd-exporter-http-metrics - port: 8000 - targetPort: 8000 - - name: consul-exporter-http-metrics - port: 9107 - targetPort: 9107 - selector: - name: consul ---- -apiVersion: v1 -kind: Service metadata: labels: name: distributor @@ -386,11 +111,29 @@ spec: - name: distributor-grpc port: 9095 targetPort: 9095 + - name: distributor-gossip-ring + port: 7946 + targetPort: 7946 selector: name: distributor --- apiVersion: v1 kind: Service +metadata: + name: gossip-ring + namespace: default +spec: + clusterIP: None + ports: + - name: gossip-ring + port: 7946 + protocol: TCP + targetPort: 7946 + selector: + gossip_ring_member: "true" +--- +apiVersion: v1 +kind: Service metadata: labels: name: ingester @@ -404,6 +147,9 @@ spec: - name: ingester-grpc port: 9095 targetPort: 9095 + - name: ingester-gossip-ring + port: 7946 + targetPort: 7946 selector: name: ingester --- @@ -498,6 +244,9 @@ spec: - name: querier-grpc port: 9095 targetPort: 9095 + - name: querier-gossip-ring + port: 7946 + targetPort: 7946 selector: name: querier --- @@ -610,6 +359,9 @@ spec: - name: ruler-querier-grpc port: 9095 targetPort: 9095 + - name: ruler-querier-gossip-ring + port: 7946 + targetPort: 7946 selector: name: ruler-querier --- @@ -685,136 +437,14 @@ spec: - name: store-gateway-grpc port: 9095 targetPort: 9095 + - name: store-gateway-gossip-ring + port: 7946 + targetPort: 7946 selector: name: store-gateway --- apiVersion: apps/v1 kind: Deployment -metadata: - name: consul - namespace: default -spec: - minReadySeconds: 10 - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: consul - template: - metadata: - annotations: - consul-hash: e56ef6821a3557604caccaf6d5820239 - labels: - name: consul - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: consul - topologyKey: kubernetes.io/hostname - - labelSelector: - matchLabels: - name: ingester - namespaces: - - default - topologyKey: kubernetes.io/hostname - containers: - - args: - - agent - - -ui - - -server - - -client=0.0.0.0 - - -config-file=/etc/config/consul-config.json - - -bootstrap-expect=1 - - -ui-content-path=/default/consul/ - env: - - name: CHECKPOINT_DISABLE - value: "1" - image: consul:1.5.3 - imagePullPolicy: IfNotPresent - name: consul - ports: - - containerPort: 8300 - name: server - - containerPort: 8301 - name: serf - - containerPort: 8400 - name: client - - containerPort: 8500 - name: api - resources: - requests: - cpu: "4" - memory: 4Gi - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --namespace=$(POD_NAMESPACE) - - --pod-name=$(POD_NAME) - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: weaveworks/consul-sidekick:master-f18ad13 - imagePullPolicy: IfNotPresent - name: sidekick - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --web.listen-address=:8000 - - --statsd.mapping-config=/etc/config/mapping - image: prom/statsd-exporter:v0.12.2 - imagePullPolicy: IfNotPresent - name: statsd-exporter - ports: - - containerPort: 8000 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --consul.server=localhost:8500 - - --web.listen-address=:9107 - - --consul.timeout=1s - - --no-consul.health-summary - - --consul.allow_stale - image: prom/consul-exporter:v0.5.0 - imagePullPolicy: IfNotPresent - name: consul-exporter - ports: - - containerPort: 9107 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - serviceAccount: consul-sidekick - volumes: - - configMap: - name: consul - name: consul - - emptyDir: - medium: Memory - name: data ---- -apiVersion: apps/v1 -kind: Deployment metadata: name: distributor namespace: default @@ -832,6 +462,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: distributor spec: affinity: @@ -851,15 +482,16 @@ spec: - -distributor.health-check-ingesters=true - -distributor.ingestion-burst-size=200000 - -distributor.ingestion-rate-limit=10000 - - -distributor.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -distributor.ring.prefix= - - -distributor.ring.store=consul - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -distributor.ring.store=memberlist - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=1073741824 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.max-connection-age=2m - -server.grpc.keepalive.max-connection-age-grace=5m @@ -876,6 +508,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -915,6 +549,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: querier spec: affinity: @@ -935,12 +570,14 @@ spec: - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - -distributor.health-check-ingesters=true - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 - -querier.max-concurrent=8 - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 @@ -949,10 +586,9 @@ spec: - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - -server.http-write-timeout=1m - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store.max-query-length=768h - -target=querier env: @@ -966,6 +602,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1140,6 +778,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ruler spec: affinity: @@ -1160,27 +799,27 @@ spec: - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - -distributor.health-check-ingesters=true - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.backend=gcs - -ruler-storage.gcs.bucket-name=rules-bucket - -ruler.alertmanager-url=http://alertmanager.default.svc.cluster.local/alertmanager - -ruler.max-rule-groups-per-tenant=35 - -ruler.max-rules-per-rule-group=20 - - -ruler.ring.consul.hostname=consul.default.svc.cluster.local:8500 - - -ruler.ring.store=consul + - -ruler.ring.store=memberlist - -ruler.rule-path=/rules - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store.max-query-length=768h - -target=ruler image: grafana/mimir:2.1.0 @@ -1252,12 +891,14 @@ spec: - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - -distributor.health-check-ingesters=true - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 - -querier.max-concurrent=8 - -querier.scheduler-address=ruler-query-scheduler-discovery.default.svc.cluster.local:9095 @@ -1266,10 +907,9 @@ spec: - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - -server.http-write-timeout=1m - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store.max-query-length=768h - -target=querier env: @@ -1283,6 +923,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1451,17 +1093,20 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: alertmanager spec: containers: - args: - -alertmanager-storage.backend=gcs - -alertmanager-storage.gcs.bucket-name=alerts-bucket - - -alertmanager.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -alertmanager.sharding-ring.replication-factor=3 - - -alertmanager.sharding-ring.store=consul + - -alertmanager.sharding-ring.store=memberlist - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true @@ -1480,6 +1125,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1535,6 +1182,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: compactor spec: containers: @@ -1551,13 +1199,15 @@ spec: - -compactor.deletion-delay=2h - -compactor.max-closing-blocks-concurrency=2 - -compactor.max-opening-blocks-concurrency=4 - - -compactor.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -compactor.ring.prefix= - - -compactor.ring.store=consul + - -compactor.ring.store=memberlist - -compactor.ring.wait-stability-min-duration=1m - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true @@ -1571,6 +1221,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1627,6 +1279,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ingester spec: affinity: @@ -1646,16 +1299,18 @@ spec: - -distributor.health-check-ingesters=true - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-period=15s - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.num-tokens=512 - -ingester.ring.prefix= - -ingester.ring.readiness-check-ring-health=false - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc-max-concurrent-streams=10000 - -server.grpc.keepalive.min-time-between-pings=10s @@ -1670,6 +1325,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1938,6 +1595,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: store-gateway spec: affinity: @@ -1975,14 +1633,16 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store-gateway.sharding-ring.tokens-file-path=/data/tokens - -store-gateway.sharding-ring.wait-stability-min-duration=1m - -target=store-gateway @@ -1994,6 +1654,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready diff --git a/operations/mimir-tests/test-shuffle-sharding-generated.yaml b/operations/mimir-tests/test-shuffle-sharding-generated.yaml index 0748befd73..09644948d1 100644 --- a/operations/mimir-tests/test-shuffle-sharding-generated.yaml +++ b/operations/mimir-tests/test-shuffle-sharding-generated.yaml @@ -43,225 +43,6 @@ spec: name: store-gateway --- apiVersion: v1 -kind: ServiceAccount -metadata: - name: consul-sidekick - namespace: default ---- -apiVersion: v1 -data: - consul-config.json: '{"leave_on_terminate": true, "raft_snapshot_threshold": 128, - "raft_trailing_logs": 10000, "telemetry": {"dogstatsd_addr": "127.0.0.1:9125"}}' - mapping: | - mappings: - - match: consul.*.runtime.* - name: consul_runtime - labels: - type: $2 - - match: consul.runtime.total_gc_pause_ns - name: consul_runtime_total_gc_pause_ns - labels: - type: $2 - - match: consul.consul.health.service.query-tag.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3 - - match: consul.consul.health.service.query-tag.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4 - - match: consul.consul.health.service.query-tag.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11.$12 - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.dns.domain_query.*.*.*.*.* - name: consul_dns_domain_query - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.not-found.* - name: consul_health_service_not_found - labels: - query: $1 - - match: consul.consul.health.service.query.* - name: consul_health_service_query - labels: - query: $1 - - match: consul.*.memberlist.health.score - name: consul_memberlist_health_score - labels: {} - - match: consul.serf.queue.* - name: consul_serf_events - labels: - type: $1 - - match: consul.serf.snapshot.appendLine - name: consul_serf_snapshot_appendLine - labels: - type: $1 - - match: consul.serf.coordinate.adjustment-ms - name: consul_serf_coordinate_adjustment_ms - labels: {} - - match: consul.consul.rpc.query - name: consul_rpc_query - labels: {} - - match: consul.*.consul.session_ttl.active - name: consul_session_ttl_active - labels: {} - - match: consul.raft.rpc.* - name: consul_raft_rpc - labels: - type: $1 - - match: consul.raft.rpc.appendEntries.storeLogs - name: consul_raft_rpc_appendEntries_storeLogs - labels: - type: $1 - - match: consul.consul.fsm.persist - name: consul_fsm_persist - labels: {} - - match: consul.raft.fsm.apply - name: consul_raft_fsm_apply - labels: {} - - match: consul.raft.leader.lastContact - name: consul_raft_leader_lastcontact - labels: {} - - match: consul.raft.leader.dispatchLog - name: consul_raft_leader_dispatchLog - labels: {} - - match: consul.raft.commitTime - name: consul_raft_commitTime - labels: {} - - match: consul.raft.replication.appendEntries.logs.*.*.*.* - name: consul_raft_replication_appendEntries_logs - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.appendEntries.rpc.*.*.*.* - name: consul_raft_replication_appendEntries_rpc - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.heartbeat.*.*.*.* - name: consul_raft_replication_heartbeat - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.consul.rpc.request - name: consul_rpc_requests - labels: {} - - match: consul.consul.rpc.accept_conn - name: consul_rpc_accept_conn - labels: {} - - match: consul.memberlist.udp.* - name: consul_memberlist_udp - labels: - type: $1 - - match: consul.memberlist.tcp.* - name: consul_memberlist_tcp - labels: - type: $1 - - match: consul.memberlist.gossip - name: consul_memberlist_gossip - labels: {} - - match: consul.memberlist.probeNode - name: consul_memberlist_probenode - labels: {} - - match: consul.memberlist.pushPullNode - name: consul_memberlist_pushpullnode - labels: {} - - match: consul.http.* - name: consul_http_request - labels: - method: $1 - path: / - - match: consul.http.*.* - name: consul_http_request - labels: - method: $1 - path: /$2 - - match: consul.http.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3 - - match: consul.http.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4 - - match: consul.http.*.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4/$5 - - match: consul.consul.leader.barrier - name: consul_leader_barrier - labels: {} - - match: consul.consul.leader.reconcileMember - name: consul_leader_reconcileMember - labels: {} - - match: consul.consul.leader.reconcile - name: consul_leader_reconcile - labels: {} - - match: consul.consul.fsm.coordinate.batch-update - name: consul_fsm_coordinate_batch_update - labels: {} - - match: consul.consul.fsm.autopilot - name: consul_fsm_autopilot - labels: {} - - match: consul.consul.fsm.kvs.cas - name: consul_fsm_kvs_cas - labels: {} - - match: consul.consul.fsm.register - name: consul_fsm_register - labels: {} - - match: consul.consul.fsm.deregister - name: consul_fsm_deregister - labels: {} - - match: consul.consul.fsm.tombstone.reap - name: consul_fsm_tombstone_reap - labels: {} - - match: consul.consul.catalog.register - name: consul_catalog_register - labels: {} - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.leader.reapTombstones - name: consul_leader_reapTombstones - labels: {} -kind: ConfigMap -metadata: - name: consul - namespace: default ---- -apiVersion: v1 data: overrides.yaml: | overrides: {} @@ -270,38 +51,6 @@ metadata: name: overrides namespace: default --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: consul-sidekick - namespace: default -rules: -- apiGroups: - - "" - - extensions - - apps - resources: - - pods - - replicasets - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: consul-sidekick - namespace: default -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: consul-sidekick -subjects: -- kind: ServiceAccount - name: consul-sidekick - namespace: default ---- apiVersion: v1 kind: Service metadata: @@ -318,6 +67,9 @@ spec: - name: alertmanager-grpc port: 9095 targetPort: 9095 + - name: alertmanager-gossip-ring + port: 7946 + targetPort: 7946 selector: name: alertmanager --- @@ -337,41 +89,14 @@ spec: - name: compactor-grpc port: 9095 targetPort: 9095 + - name: compactor-gossip-ring + port: 7946 + targetPort: 7946 selector: name: compactor --- apiVersion: v1 kind: Service -metadata: - labels: - name: consul - name: consul - namespace: default -spec: - ports: - - name: consul-server - port: 8300 - targetPort: 8300 - - name: consul-serf - port: 8301 - targetPort: 8301 - - name: consul-client - port: 8400 - targetPort: 8400 - - name: consul-api - port: 8500 - targetPort: 8500 - - name: statsd-exporter-http-metrics - port: 8000 - targetPort: 8000 - - name: consul-exporter-http-metrics - port: 9107 - targetPort: 9107 - selector: - name: consul ---- -apiVersion: v1 -kind: Service metadata: labels: name: distributor @@ -386,11 +111,29 @@ spec: - name: distributor-grpc port: 9095 targetPort: 9095 + - name: distributor-gossip-ring + port: 7946 + targetPort: 7946 selector: name: distributor --- apiVersion: v1 kind: Service +metadata: + name: gossip-ring + namespace: default +spec: + clusterIP: None + ports: + - name: gossip-ring + port: 7946 + protocol: TCP + targetPort: 7946 + selector: + gossip_ring_member: "true" +--- +apiVersion: v1 +kind: Service metadata: labels: name: ingester @@ -404,6 +147,9 @@ spec: - name: ingester-grpc port: 9095 targetPort: 9095 + - name: ingester-gossip-ring + port: 7946 + targetPort: 7946 selector: name: ingester --- @@ -498,6 +244,9 @@ spec: - name: querier-grpc port: 9095 targetPort: 9095 + - name: querier-gossip-ring + port: 7946 + targetPort: 7946 selector: name: querier --- @@ -610,136 +359,14 @@ spec: - name: store-gateway-grpc port: 9095 targetPort: 9095 + - name: store-gateway-gossip-ring + port: 7946 + targetPort: 7946 selector: name: store-gateway --- apiVersion: apps/v1 kind: Deployment -metadata: - name: consul - namespace: default -spec: - minReadySeconds: 10 - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: consul - template: - metadata: - annotations: - consul-hash: e56ef6821a3557604caccaf6d5820239 - labels: - name: consul - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: consul - topologyKey: kubernetes.io/hostname - - labelSelector: - matchLabels: - name: ingester - namespaces: - - default - topologyKey: kubernetes.io/hostname - containers: - - args: - - agent - - -ui - - -server - - -client=0.0.0.0 - - -config-file=/etc/config/consul-config.json - - -bootstrap-expect=1 - - -ui-content-path=/default/consul/ - env: - - name: CHECKPOINT_DISABLE - value: "1" - image: consul:1.5.3 - imagePullPolicy: IfNotPresent - name: consul - ports: - - containerPort: 8300 - name: server - - containerPort: 8301 - name: serf - - containerPort: 8400 - name: client - - containerPort: 8500 - name: api - resources: - requests: - cpu: "4" - memory: 4Gi - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --namespace=$(POD_NAMESPACE) - - --pod-name=$(POD_NAME) - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: weaveworks/consul-sidekick:master-f18ad13 - imagePullPolicy: IfNotPresent - name: sidekick - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --web.listen-address=:8000 - - --statsd.mapping-config=/etc/config/mapping - image: prom/statsd-exporter:v0.12.2 - imagePullPolicy: IfNotPresent - name: statsd-exporter - ports: - - containerPort: 8000 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --consul.server=localhost:8500 - - --web.listen-address=:9107 - - --consul.timeout=1s - - --no-consul.health-summary - - --consul.allow_stale - image: prom/consul-exporter:v0.5.0 - imagePullPolicy: IfNotPresent - name: consul-exporter - ports: - - containerPort: 9107 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - serviceAccount: consul-sidekick - volumes: - - configMap: - name: consul - name: consul - - emptyDir: - medium: Memory - name: data ---- -apiVersion: apps/v1 -kind: Deployment metadata: name: distributor namespace: default @@ -757,6 +384,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: distributor spec: affinity: @@ -777,15 +405,16 @@ spec: - -distributor.ingestion-burst-size=200000 - -distributor.ingestion-rate-limit=10000 - -distributor.ingestion-tenant-shard-size=3 - - -distributor.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -distributor.ring.prefix= - - -distributor.ring.store=consul - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -distributor.ring.store=memberlist - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=1073741824 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.max-connection-age=2m - -server.grpc.keepalive.max-connection-age-grace=5m @@ -802,6 +431,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -841,6 +472,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: querier spec: affinity: @@ -862,12 +494,14 @@ spec: - -blocks-storage.gcs.bucket-name=blocks-bucket - -distributor.health-check-ingesters=true - -distributor.ingestion-tenant-shard-size=3 - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 - -querier.max-concurrent=8 - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 @@ -876,10 +510,9 @@ spec: - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - -server.http-write-timeout=1m - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store-gateway.tenant-shard-size=3 - -store.max-query-length=768h - -target=querier @@ -894,6 +527,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1070,6 +705,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ruler spec: affinity: @@ -1091,28 +727,28 @@ spec: - -blocks-storage.gcs.bucket-name=blocks-bucket - -distributor.health-check-ingesters=true - -distributor.ingestion-tenant-shard-size=3 - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.backend=gcs - -ruler-storage.gcs.bucket-name=rules-bucket - -ruler.alertmanager-url=http://alertmanager.default.svc.cluster.local/alertmanager - -ruler.max-rule-groups-per-tenant=35 - -ruler.max-rules-per-rule-group=20 - - -ruler.ring.consul.hostname=consul.default.svc.cluster.local:8500 - - -ruler.ring.store=consul + - -ruler.ring.store=memberlist - -ruler.rule-path=/rules - -ruler.tenant-shard-size=2 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store-gateway.tenant-shard-size=3 - -store.max-query-length=768h - -target=ruler @@ -1162,17 +798,20 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: alertmanager spec: containers: - args: - -alertmanager-storage.backend=gcs - -alertmanager-storage.gcs.bucket-name=alerts-bucket - - -alertmanager.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -alertmanager.sharding-ring.replication-factor=3 - - -alertmanager.sharding-ring.store=consul + - -alertmanager.sharding-ring.store=memberlist - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true @@ -1191,6 +830,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1246,6 +887,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: compactor spec: containers: @@ -1262,13 +904,15 @@ spec: - -compactor.deletion-delay=2h - -compactor.max-closing-blocks-concurrency=2 - -compactor.max-opening-blocks-concurrency=4 - - -compactor.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -compactor.ring.prefix= - - -compactor.ring.store=consul + - -compactor.ring.store=memberlist - -compactor.ring.wait-stability-min-duration=1m - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true @@ -1282,6 +926,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1338,6 +984,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ingester spec: affinity: @@ -1358,16 +1005,18 @@ spec: - -distributor.ingestion-tenant-shard-size=3 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-period=15s - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.num-tokens=512 - -ingester.ring.prefix= - -ingester.ring.readiness-check-ring-health=false - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc-max-concurrent-streams=10000 - -server.grpc.keepalive.min-time-between-pings=10s @@ -1382,6 +1031,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1650,6 +1301,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: store-gateway spec: affinity: @@ -1687,14 +1339,16 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store-gateway.sharding-ring.tokens-file-path=/data/tokens - -store-gateway.sharding-ring.wait-stability-min-duration=1m - -store-gateway.tenant-shard-size=3 @@ -1707,6 +1361,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready diff --git a/operations/mimir-tests/test-storage-azure-generated.yaml b/operations/mimir-tests/test-storage-azure-generated.yaml index 28a2d7ac06..a543dd3e59 100644 --- a/operations/mimir-tests/test-storage-azure-generated.yaml +++ b/operations/mimir-tests/test-storage-azure-generated.yaml @@ -43,225 +43,6 @@ spec: name: store-gateway --- apiVersion: v1 -kind: ServiceAccount -metadata: - name: consul-sidekick - namespace: default ---- -apiVersion: v1 -data: - consul-config.json: '{"leave_on_terminate": true, "raft_snapshot_threshold": 128, - "raft_trailing_logs": 10000, "telemetry": {"dogstatsd_addr": "127.0.0.1:9125"}}' - mapping: | - mappings: - - match: consul.*.runtime.* - name: consul_runtime - labels: - type: $2 - - match: consul.runtime.total_gc_pause_ns - name: consul_runtime_total_gc_pause_ns - labels: - type: $2 - - match: consul.consul.health.service.query-tag.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3 - - match: consul.consul.health.service.query-tag.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4 - - match: consul.consul.health.service.query-tag.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11.$12 - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.dns.domain_query.*.*.*.*.* - name: consul_dns_domain_query - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.not-found.* - name: consul_health_service_not_found - labels: - query: $1 - - match: consul.consul.health.service.query.* - name: consul_health_service_query - labels: - query: $1 - - match: consul.*.memberlist.health.score - name: consul_memberlist_health_score - labels: {} - - match: consul.serf.queue.* - name: consul_serf_events - labels: - type: $1 - - match: consul.serf.snapshot.appendLine - name: consul_serf_snapshot_appendLine - labels: - type: $1 - - match: consul.serf.coordinate.adjustment-ms - name: consul_serf_coordinate_adjustment_ms - labels: {} - - match: consul.consul.rpc.query - name: consul_rpc_query - labels: {} - - match: consul.*.consul.session_ttl.active - name: consul_session_ttl_active - labels: {} - - match: consul.raft.rpc.* - name: consul_raft_rpc - labels: - type: $1 - - match: consul.raft.rpc.appendEntries.storeLogs - name: consul_raft_rpc_appendEntries_storeLogs - labels: - type: $1 - - match: consul.consul.fsm.persist - name: consul_fsm_persist - labels: {} - - match: consul.raft.fsm.apply - name: consul_raft_fsm_apply - labels: {} - - match: consul.raft.leader.lastContact - name: consul_raft_leader_lastcontact - labels: {} - - match: consul.raft.leader.dispatchLog - name: consul_raft_leader_dispatchLog - labels: {} - - match: consul.raft.commitTime - name: consul_raft_commitTime - labels: {} - - match: consul.raft.replication.appendEntries.logs.*.*.*.* - name: consul_raft_replication_appendEntries_logs - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.appendEntries.rpc.*.*.*.* - name: consul_raft_replication_appendEntries_rpc - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.heartbeat.*.*.*.* - name: consul_raft_replication_heartbeat - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.consul.rpc.request - name: consul_rpc_requests - labels: {} - - match: consul.consul.rpc.accept_conn - name: consul_rpc_accept_conn - labels: {} - - match: consul.memberlist.udp.* - name: consul_memberlist_udp - labels: - type: $1 - - match: consul.memberlist.tcp.* - name: consul_memberlist_tcp - labels: - type: $1 - - match: consul.memberlist.gossip - name: consul_memberlist_gossip - labels: {} - - match: consul.memberlist.probeNode - name: consul_memberlist_probenode - labels: {} - - match: consul.memberlist.pushPullNode - name: consul_memberlist_pushpullnode - labels: {} - - match: consul.http.* - name: consul_http_request - labels: - method: $1 - path: / - - match: consul.http.*.* - name: consul_http_request - labels: - method: $1 - path: /$2 - - match: consul.http.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3 - - match: consul.http.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4 - - match: consul.http.*.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4/$5 - - match: consul.consul.leader.barrier - name: consul_leader_barrier - labels: {} - - match: consul.consul.leader.reconcileMember - name: consul_leader_reconcileMember - labels: {} - - match: consul.consul.leader.reconcile - name: consul_leader_reconcile - labels: {} - - match: consul.consul.fsm.coordinate.batch-update - name: consul_fsm_coordinate_batch_update - labels: {} - - match: consul.consul.fsm.autopilot - name: consul_fsm_autopilot - labels: {} - - match: consul.consul.fsm.kvs.cas - name: consul_fsm_kvs_cas - labels: {} - - match: consul.consul.fsm.register - name: consul_fsm_register - labels: {} - - match: consul.consul.fsm.deregister - name: consul_fsm_deregister - labels: {} - - match: consul.consul.fsm.tombstone.reap - name: consul_fsm_tombstone_reap - labels: {} - - match: consul.consul.catalog.register - name: consul_catalog_register - labels: {} - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.leader.reapTombstones - name: consul_leader_reapTombstones - labels: {} -kind: ConfigMap -metadata: - name: consul - namespace: default ---- -apiVersion: v1 data: overrides.yaml: | overrides: {} @@ -270,38 +51,6 @@ metadata: name: overrides namespace: default --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: consul-sidekick - namespace: default -rules: -- apiGroups: - - "" - - extensions - - apps - resources: - - pods - - replicasets - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: consul-sidekick - namespace: default -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: consul-sidekick -subjects: -- kind: ServiceAccount - name: consul-sidekick - namespace: default ---- apiVersion: v1 kind: Service metadata: @@ -318,6 +67,9 @@ spec: - name: alertmanager-grpc port: 9095 targetPort: 9095 + - name: alertmanager-gossip-ring + port: 7946 + targetPort: 7946 selector: name: alertmanager --- @@ -337,41 +89,14 @@ spec: - name: compactor-grpc port: 9095 targetPort: 9095 + - name: compactor-gossip-ring + port: 7946 + targetPort: 7946 selector: name: compactor --- apiVersion: v1 kind: Service -metadata: - labels: - name: consul - name: consul - namespace: default -spec: - ports: - - name: consul-server - port: 8300 - targetPort: 8300 - - name: consul-serf - port: 8301 - targetPort: 8301 - - name: consul-client - port: 8400 - targetPort: 8400 - - name: consul-api - port: 8500 - targetPort: 8500 - - name: statsd-exporter-http-metrics - port: 8000 - targetPort: 8000 - - name: consul-exporter-http-metrics - port: 9107 - targetPort: 9107 - selector: - name: consul ---- -apiVersion: v1 -kind: Service metadata: labels: name: distributor @@ -386,11 +111,29 @@ spec: - name: distributor-grpc port: 9095 targetPort: 9095 + - name: distributor-gossip-ring + port: 7946 + targetPort: 7946 selector: name: distributor --- apiVersion: v1 kind: Service +metadata: + name: gossip-ring + namespace: default +spec: + clusterIP: None + ports: + - name: gossip-ring + port: 7946 + protocol: TCP + targetPort: 7946 + selector: + gossip_ring_member: "true" +--- +apiVersion: v1 +kind: Service metadata: labels: name: ingester @@ -404,6 +147,9 @@ spec: - name: ingester-grpc port: 9095 targetPort: 9095 + - name: ingester-gossip-ring + port: 7946 + targetPort: 7946 selector: name: ingester --- @@ -498,6 +244,9 @@ spec: - name: querier-grpc port: 9095 targetPort: 9095 + - name: querier-gossip-ring + port: 7946 + targetPort: 7946 selector: name: querier --- @@ -610,136 +359,14 @@ spec: - name: store-gateway-grpc port: 9095 targetPort: 9095 + - name: store-gateway-gossip-ring + port: 7946 + targetPort: 7946 selector: name: store-gateway --- apiVersion: apps/v1 kind: Deployment -metadata: - name: consul - namespace: default -spec: - minReadySeconds: 10 - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: consul - template: - metadata: - annotations: - consul-hash: e56ef6821a3557604caccaf6d5820239 - labels: - name: consul - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: consul - topologyKey: kubernetes.io/hostname - - labelSelector: - matchLabels: - name: ingester - namespaces: - - default - topologyKey: kubernetes.io/hostname - containers: - - args: - - agent - - -ui - - -server - - -client=0.0.0.0 - - -config-file=/etc/config/consul-config.json - - -bootstrap-expect=1 - - -ui-content-path=/default/consul/ - env: - - name: CHECKPOINT_DISABLE - value: "1" - image: consul:1.5.3 - imagePullPolicy: IfNotPresent - name: consul - ports: - - containerPort: 8300 - name: server - - containerPort: 8301 - name: serf - - containerPort: 8400 - name: client - - containerPort: 8500 - name: api - resources: - requests: - cpu: "4" - memory: 4Gi - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --namespace=$(POD_NAMESPACE) - - --pod-name=$(POD_NAME) - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: weaveworks/consul-sidekick:master-f18ad13 - imagePullPolicy: IfNotPresent - name: sidekick - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --web.listen-address=:8000 - - --statsd.mapping-config=/etc/config/mapping - image: prom/statsd-exporter:v0.12.2 - imagePullPolicy: IfNotPresent - name: statsd-exporter - ports: - - containerPort: 8000 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --consul.server=localhost:8500 - - --web.listen-address=:9107 - - --consul.timeout=1s - - --no-consul.health-summary - - --consul.allow_stale - image: prom/consul-exporter:v0.5.0 - imagePullPolicy: IfNotPresent - name: consul-exporter - ports: - - containerPort: 9107 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - serviceAccount: consul-sidekick - volumes: - - configMap: - name: consul - name: consul - - emptyDir: - medium: Memory - name: data ---- -apiVersion: apps/v1 -kind: Deployment metadata: name: distributor namespace: default @@ -757,6 +384,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: distributor spec: affinity: @@ -776,15 +404,16 @@ spec: - -distributor.health-check-ingesters=true - -distributor.ingestion-burst-size=200000 - -distributor.ingestion-rate-limit=10000 - - -distributor.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -distributor.ring.prefix= - - -distributor.ring.store=consul - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -distributor.ring.store=memberlist - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=1073741824 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.max-connection-age=2m - -server.grpc.keepalive.max-connection-age-grace=5m @@ -801,6 +430,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -840,6 +471,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: querier spec: affinity: @@ -862,12 +494,14 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -distributor.health-check-ingesters=true - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 - -querier.max-concurrent=8 - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 @@ -876,10 +510,9 @@ spec: - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - -server.http-write-timeout=1m - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store.max-query-length=768h - -target=querier env: @@ -893,6 +526,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1067,6 +702,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ruler spec: affinity: @@ -1089,11 +725,13 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -distributor.health-check-ingesters=true - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.azure.account-key=rules-account-key - -ruler-storage.azure.account-name=rules-account-name - -ruler-storage.azure.container-name=rules-bucket @@ -1101,17 +739,15 @@ spec: - -ruler.alertmanager-url=http://alertmanager.default.svc.cluster.local/alertmanager - -ruler.max-rule-groups-per-tenant=35 - -ruler.max-rules-per-rule-group=20 - - -ruler.ring.consul.hostname=consul.default.svc.cluster.local:8500 - - -ruler.ring.store=consul + - -ruler.ring.store=memberlist - -ruler.rule-path=/rules - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store.max-query-length=768h - -target=ruler image: grafana/mimir:2.1.0 @@ -1160,6 +796,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: alertmanager spec: containers: @@ -1168,11 +805,13 @@ spec: - -alertmanager-storage.azure.account-name=alerts-account-name - -alertmanager-storage.azure.container-name=alerts-bucket - -alertmanager-storage.backend=azure - - -alertmanager.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -alertmanager.sharding-ring.replication-factor=3 - - -alertmanager.sharding-ring.store=consul + - -alertmanager.sharding-ring.store=memberlist - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true @@ -1191,6 +830,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1246,6 +887,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: compactor spec: containers: @@ -1264,13 +906,15 @@ spec: - -compactor.deletion-delay=2h - -compactor.max-closing-blocks-concurrency=2 - -compactor.max-opening-blocks-concurrency=4 - - -compactor.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -compactor.ring.prefix= - - -compactor.ring.store=consul + - -compactor.ring.store=memberlist - -compactor.ring.wait-stability-min-duration=1m - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true @@ -1284,6 +928,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1340,6 +986,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ingester spec: affinity: @@ -1361,16 +1008,18 @@ spec: - -distributor.health-check-ingesters=true - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-period=15s - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.num-tokens=512 - -ingester.ring.prefix= - -ingester.ring.readiness-check-ring-health=false - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc-max-concurrent-streams=10000 - -server.grpc.keepalive.min-time-between-pings=10s @@ -1385,6 +1034,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1653,6 +1304,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: store-gateway spec: affinity: @@ -1692,14 +1344,16 @@ spec: - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store-gateway.sharding-ring.tokens-file-path=/data/tokens - -store-gateway.sharding-ring.wait-stability-min-duration=1m - -target=store-gateway @@ -1711,6 +1365,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready diff --git a/operations/mimir-tests/test-storage-gcs-generated.yaml b/operations/mimir-tests/test-storage-gcs-generated.yaml index e64d662b4d..6ea4883c89 100644 --- a/operations/mimir-tests/test-storage-gcs-generated.yaml +++ b/operations/mimir-tests/test-storage-gcs-generated.yaml @@ -43,225 +43,6 @@ spec: name: store-gateway --- apiVersion: v1 -kind: ServiceAccount -metadata: - name: consul-sidekick - namespace: default ---- -apiVersion: v1 -data: - consul-config.json: '{"leave_on_terminate": true, "raft_snapshot_threshold": 128, - "raft_trailing_logs": 10000, "telemetry": {"dogstatsd_addr": "127.0.0.1:9125"}}' - mapping: | - mappings: - - match: consul.*.runtime.* - name: consul_runtime - labels: - type: $2 - - match: consul.runtime.total_gc_pause_ns - name: consul_runtime_total_gc_pause_ns - labels: - type: $2 - - match: consul.consul.health.service.query-tag.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3 - - match: consul.consul.health.service.query-tag.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4 - - match: consul.consul.health.service.query-tag.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11.$12 - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.dns.domain_query.*.*.*.*.* - name: consul_dns_domain_query - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.not-found.* - name: consul_health_service_not_found - labels: - query: $1 - - match: consul.consul.health.service.query.* - name: consul_health_service_query - labels: - query: $1 - - match: consul.*.memberlist.health.score - name: consul_memberlist_health_score - labels: {} - - match: consul.serf.queue.* - name: consul_serf_events - labels: - type: $1 - - match: consul.serf.snapshot.appendLine - name: consul_serf_snapshot_appendLine - labels: - type: $1 - - match: consul.serf.coordinate.adjustment-ms - name: consul_serf_coordinate_adjustment_ms - labels: {} - - match: consul.consul.rpc.query - name: consul_rpc_query - labels: {} - - match: consul.*.consul.session_ttl.active - name: consul_session_ttl_active - labels: {} - - match: consul.raft.rpc.* - name: consul_raft_rpc - labels: - type: $1 - - match: consul.raft.rpc.appendEntries.storeLogs - name: consul_raft_rpc_appendEntries_storeLogs - labels: - type: $1 - - match: consul.consul.fsm.persist - name: consul_fsm_persist - labels: {} - - match: consul.raft.fsm.apply - name: consul_raft_fsm_apply - labels: {} - - match: consul.raft.leader.lastContact - name: consul_raft_leader_lastcontact - labels: {} - - match: consul.raft.leader.dispatchLog - name: consul_raft_leader_dispatchLog - labels: {} - - match: consul.raft.commitTime - name: consul_raft_commitTime - labels: {} - - match: consul.raft.replication.appendEntries.logs.*.*.*.* - name: consul_raft_replication_appendEntries_logs - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.appendEntries.rpc.*.*.*.* - name: consul_raft_replication_appendEntries_rpc - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.heartbeat.*.*.*.* - name: consul_raft_replication_heartbeat - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.consul.rpc.request - name: consul_rpc_requests - labels: {} - - match: consul.consul.rpc.accept_conn - name: consul_rpc_accept_conn - labels: {} - - match: consul.memberlist.udp.* - name: consul_memberlist_udp - labels: - type: $1 - - match: consul.memberlist.tcp.* - name: consul_memberlist_tcp - labels: - type: $1 - - match: consul.memberlist.gossip - name: consul_memberlist_gossip - labels: {} - - match: consul.memberlist.probeNode - name: consul_memberlist_probenode - labels: {} - - match: consul.memberlist.pushPullNode - name: consul_memberlist_pushpullnode - labels: {} - - match: consul.http.* - name: consul_http_request - labels: - method: $1 - path: / - - match: consul.http.*.* - name: consul_http_request - labels: - method: $1 - path: /$2 - - match: consul.http.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3 - - match: consul.http.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4 - - match: consul.http.*.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4/$5 - - match: consul.consul.leader.barrier - name: consul_leader_barrier - labels: {} - - match: consul.consul.leader.reconcileMember - name: consul_leader_reconcileMember - labels: {} - - match: consul.consul.leader.reconcile - name: consul_leader_reconcile - labels: {} - - match: consul.consul.fsm.coordinate.batch-update - name: consul_fsm_coordinate_batch_update - labels: {} - - match: consul.consul.fsm.autopilot - name: consul_fsm_autopilot - labels: {} - - match: consul.consul.fsm.kvs.cas - name: consul_fsm_kvs_cas - labels: {} - - match: consul.consul.fsm.register - name: consul_fsm_register - labels: {} - - match: consul.consul.fsm.deregister - name: consul_fsm_deregister - labels: {} - - match: consul.consul.fsm.tombstone.reap - name: consul_fsm_tombstone_reap - labels: {} - - match: consul.consul.catalog.register - name: consul_catalog_register - labels: {} - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.leader.reapTombstones - name: consul_leader_reapTombstones - labels: {} -kind: ConfigMap -metadata: - name: consul - namespace: default ---- -apiVersion: v1 data: overrides.yaml: | overrides: {} @@ -270,38 +51,6 @@ metadata: name: overrides namespace: default --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: consul-sidekick - namespace: default -rules: -- apiGroups: - - "" - - extensions - - apps - resources: - - pods - - replicasets - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: consul-sidekick - namespace: default -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: consul-sidekick -subjects: -- kind: ServiceAccount - name: consul-sidekick - namespace: default ---- apiVersion: v1 kind: Service metadata: @@ -318,6 +67,9 @@ spec: - name: alertmanager-grpc port: 9095 targetPort: 9095 + - name: alertmanager-gossip-ring + port: 7946 + targetPort: 7946 selector: name: alertmanager --- @@ -337,41 +89,14 @@ spec: - name: compactor-grpc port: 9095 targetPort: 9095 + - name: compactor-gossip-ring + port: 7946 + targetPort: 7946 selector: name: compactor --- apiVersion: v1 kind: Service -metadata: - labels: - name: consul - name: consul - namespace: default -spec: - ports: - - name: consul-server - port: 8300 - targetPort: 8300 - - name: consul-serf - port: 8301 - targetPort: 8301 - - name: consul-client - port: 8400 - targetPort: 8400 - - name: consul-api - port: 8500 - targetPort: 8500 - - name: statsd-exporter-http-metrics - port: 8000 - targetPort: 8000 - - name: consul-exporter-http-metrics - port: 9107 - targetPort: 9107 - selector: - name: consul ---- -apiVersion: v1 -kind: Service metadata: labels: name: distributor @@ -386,11 +111,29 @@ spec: - name: distributor-grpc port: 9095 targetPort: 9095 + - name: distributor-gossip-ring + port: 7946 + targetPort: 7946 selector: name: distributor --- apiVersion: v1 kind: Service +metadata: + name: gossip-ring + namespace: default +spec: + clusterIP: None + ports: + - name: gossip-ring + port: 7946 + protocol: TCP + targetPort: 7946 + selector: + gossip_ring_member: "true" +--- +apiVersion: v1 +kind: Service metadata: labels: name: ingester @@ -404,6 +147,9 @@ spec: - name: ingester-grpc port: 9095 targetPort: 9095 + - name: ingester-gossip-ring + port: 7946 + targetPort: 7946 selector: name: ingester --- @@ -498,6 +244,9 @@ spec: - name: querier-grpc port: 9095 targetPort: 9095 + - name: querier-gossip-ring + port: 7946 + targetPort: 7946 selector: name: querier --- @@ -610,136 +359,14 @@ spec: - name: store-gateway-grpc port: 9095 targetPort: 9095 + - name: store-gateway-gossip-ring + port: 7946 + targetPort: 7946 selector: name: store-gateway --- apiVersion: apps/v1 kind: Deployment -metadata: - name: consul - namespace: default -spec: - minReadySeconds: 10 - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: consul - template: - metadata: - annotations: - consul-hash: e56ef6821a3557604caccaf6d5820239 - labels: - name: consul - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: consul - topologyKey: kubernetes.io/hostname - - labelSelector: - matchLabels: - name: ingester - namespaces: - - default - topologyKey: kubernetes.io/hostname - containers: - - args: - - agent - - -ui - - -server - - -client=0.0.0.0 - - -config-file=/etc/config/consul-config.json - - -bootstrap-expect=1 - - -ui-content-path=/default/consul/ - env: - - name: CHECKPOINT_DISABLE - value: "1" - image: consul:1.5.3 - imagePullPolicy: IfNotPresent - name: consul - ports: - - containerPort: 8300 - name: server - - containerPort: 8301 - name: serf - - containerPort: 8400 - name: client - - containerPort: 8500 - name: api - resources: - requests: - cpu: "4" - memory: 4Gi - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --namespace=$(POD_NAMESPACE) - - --pod-name=$(POD_NAME) - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: weaveworks/consul-sidekick:master-f18ad13 - imagePullPolicy: IfNotPresent - name: sidekick - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --web.listen-address=:8000 - - --statsd.mapping-config=/etc/config/mapping - image: prom/statsd-exporter:v0.12.2 - imagePullPolicy: IfNotPresent - name: statsd-exporter - ports: - - containerPort: 8000 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --consul.server=localhost:8500 - - --web.listen-address=:9107 - - --consul.timeout=1s - - --no-consul.health-summary - - --consul.allow_stale - image: prom/consul-exporter:v0.5.0 - imagePullPolicy: IfNotPresent - name: consul-exporter - ports: - - containerPort: 9107 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - serviceAccount: consul-sidekick - volumes: - - configMap: - name: consul - name: consul - - emptyDir: - medium: Memory - name: data ---- -apiVersion: apps/v1 -kind: Deployment metadata: name: distributor namespace: default @@ -757,6 +384,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: distributor spec: affinity: @@ -776,15 +404,16 @@ spec: - -distributor.health-check-ingesters=true - -distributor.ingestion-burst-size=200000 - -distributor.ingestion-rate-limit=10000 - - -distributor.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -distributor.ring.prefix= - - -distributor.ring.store=consul - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -distributor.ring.store=memberlist - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=1073741824 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.max-connection-age=2m - -server.grpc.keepalive.max-connection-age-grace=5m @@ -801,6 +430,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -840,6 +471,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: querier spec: affinity: @@ -860,12 +492,14 @@ spec: - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - -distributor.health-check-ingesters=true - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 - -querier.max-concurrent=8 - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 @@ -874,10 +508,9 @@ spec: - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - -server.http-write-timeout=1m - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store.max-query-length=768h - -target=querier env: @@ -891,6 +524,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1065,6 +700,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ruler spec: affinity: @@ -1085,27 +721,27 @@ spec: - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - -distributor.health-check-ingesters=true - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.backend=gcs - -ruler-storage.gcs.bucket-name=rules-bucket - -ruler.alertmanager-url=http://alertmanager.default.svc.cluster.local/alertmanager - -ruler.max-rule-groups-per-tenant=35 - -ruler.max-rules-per-rule-group=20 - - -ruler.ring.consul.hostname=consul.default.svc.cluster.local:8500 - - -ruler.ring.store=consul + - -ruler.ring.store=memberlist - -ruler.rule-path=/rules - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store.max-query-length=768h - -target=ruler image: grafana/mimir:2.1.0 @@ -1154,17 +790,20 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: alertmanager spec: containers: - args: - -alertmanager-storage.backend=gcs - -alertmanager-storage.gcs.bucket-name=alerts-bucket - - -alertmanager.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -alertmanager.sharding-ring.replication-factor=3 - - -alertmanager.sharding-ring.store=consul + - -alertmanager.sharding-ring.store=memberlist - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true @@ -1183,6 +822,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1238,6 +879,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: compactor spec: containers: @@ -1254,13 +896,15 @@ spec: - -compactor.deletion-delay=2h - -compactor.max-closing-blocks-concurrency=2 - -compactor.max-opening-blocks-concurrency=4 - - -compactor.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -compactor.ring.prefix= - - -compactor.ring.store=consul + - -compactor.ring.store=memberlist - -compactor.ring.wait-stability-min-duration=1m - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true @@ -1274,6 +918,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1330,6 +976,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ingester spec: affinity: @@ -1349,16 +996,18 @@ spec: - -distributor.health-check-ingesters=true - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-period=15s - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.num-tokens=512 - -ingester.ring.prefix= - -ingester.ring.readiness-check-ring-health=false - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc-max-concurrent-streams=10000 - -server.grpc.keepalive.min-time-between-pings=10s @@ -1373,6 +1022,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1641,6 +1292,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: store-gateway spec: affinity: @@ -1678,14 +1330,16 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store-gateway.sharding-ring.tokens-file-path=/data/tokens - -store-gateway.sharding-ring.wait-stability-min-duration=1m - -target=store-gateway @@ -1697,6 +1351,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready diff --git a/operations/mimir-tests/test-storage-s3-generated.yaml b/operations/mimir-tests/test-storage-s3-generated.yaml index ed11f29ca1..e20001b6a3 100644 --- a/operations/mimir-tests/test-storage-s3-generated.yaml +++ b/operations/mimir-tests/test-storage-s3-generated.yaml @@ -43,225 +43,6 @@ spec: name: store-gateway --- apiVersion: v1 -kind: ServiceAccount -metadata: - name: consul-sidekick - namespace: default ---- -apiVersion: v1 -data: - consul-config.json: '{"leave_on_terminate": true, "raft_snapshot_threshold": 128, - "raft_trailing_logs": 10000, "telemetry": {"dogstatsd_addr": "127.0.0.1:9125"}}' - mapping: | - mappings: - - match: consul.*.runtime.* - name: consul_runtime - labels: - type: $2 - - match: consul.runtime.total_gc_pause_ns - name: consul_runtime_total_gc_pause_ns - labels: - type: $2 - - match: consul.consul.health.service.query-tag.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3 - - match: consul.consul.health.service.query-tag.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4 - - match: consul.consul.health.service.query-tag.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11.$12 - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.dns.domain_query.*.*.*.*.* - name: consul_dns_domain_query - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.not-found.* - name: consul_health_service_not_found - labels: - query: $1 - - match: consul.consul.health.service.query.* - name: consul_health_service_query - labels: - query: $1 - - match: consul.*.memberlist.health.score - name: consul_memberlist_health_score - labels: {} - - match: consul.serf.queue.* - name: consul_serf_events - labels: - type: $1 - - match: consul.serf.snapshot.appendLine - name: consul_serf_snapshot_appendLine - labels: - type: $1 - - match: consul.serf.coordinate.adjustment-ms - name: consul_serf_coordinate_adjustment_ms - labels: {} - - match: consul.consul.rpc.query - name: consul_rpc_query - labels: {} - - match: consul.*.consul.session_ttl.active - name: consul_session_ttl_active - labels: {} - - match: consul.raft.rpc.* - name: consul_raft_rpc - labels: - type: $1 - - match: consul.raft.rpc.appendEntries.storeLogs - name: consul_raft_rpc_appendEntries_storeLogs - labels: - type: $1 - - match: consul.consul.fsm.persist - name: consul_fsm_persist - labels: {} - - match: consul.raft.fsm.apply - name: consul_raft_fsm_apply - labels: {} - - match: consul.raft.leader.lastContact - name: consul_raft_leader_lastcontact - labels: {} - - match: consul.raft.leader.dispatchLog - name: consul_raft_leader_dispatchLog - labels: {} - - match: consul.raft.commitTime - name: consul_raft_commitTime - labels: {} - - match: consul.raft.replication.appendEntries.logs.*.*.*.* - name: consul_raft_replication_appendEntries_logs - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.appendEntries.rpc.*.*.*.* - name: consul_raft_replication_appendEntries_rpc - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.heartbeat.*.*.*.* - name: consul_raft_replication_heartbeat - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.consul.rpc.request - name: consul_rpc_requests - labels: {} - - match: consul.consul.rpc.accept_conn - name: consul_rpc_accept_conn - labels: {} - - match: consul.memberlist.udp.* - name: consul_memberlist_udp - labels: - type: $1 - - match: consul.memberlist.tcp.* - name: consul_memberlist_tcp - labels: - type: $1 - - match: consul.memberlist.gossip - name: consul_memberlist_gossip - labels: {} - - match: consul.memberlist.probeNode - name: consul_memberlist_probenode - labels: {} - - match: consul.memberlist.pushPullNode - name: consul_memberlist_pushpullnode - labels: {} - - match: consul.http.* - name: consul_http_request - labels: - method: $1 - path: / - - match: consul.http.*.* - name: consul_http_request - labels: - method: $1 - path: /$2 - - match: consul.http.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3 - - match: consul.http.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4 - - match: consul.http.*.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4/$5 - - match: consul.consul.leader.barrier - name: consul_leader_barrier - labels: {} - - match: consul.consul.leader.reconcileMember - name: consul_leader_reconcileMember - labels: {} - - match: consul.consul.leader.reconcile - name: consul_leader_reconcile - labels: {} - - match: consul.consul.fsm.coordinate.batch-update - name: consul_fsm_coordinate_batch_update - labels: {} - - match: consul.consul.fsm.autopilot - name: consul_fsm_autopilot - labels: {} - - match: consul.consul.fsm.kvs.cas - name: consul_fsm_kvs_cas - labels: {} - - match: consul.consul.fsm.register - name: consul_fsm_register - labels: {} - - match: consul.consul.fsm.deregister - name: consul_fsm_deregister - labels: {} - - match: consul.consul.fsm.tombstone.reap - name: consul_fsm_tombstone_reap - labels: {} - - match: consul.consul.catalog.register - name: consul_catalog_register - labels: {} - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.leader.reapTombstones - name: consul_leader_reapTombstones - labels: {} -kind: ConfigMap -metadata: - name: consul - namespace: default ---- -apiVersion: v1 data: overrides.yaml: | overrides: {} @@ -270,38 +51,6 @@ metadata: name: overrides namespace: default --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: consul-sidekick - namespace: default -rules: -- apiGroups: - - "" - - extensions - - apps - resources: - - pods - - replicasets - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: consul-sidekick - namespace: default -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: consul-sidekick -subjects: -- kind: ServiceAccount - name: consul-sidekick - namespace: default ---- apiVersion: v1 kind: Service metadata: @@ -318,6 +67,9 @@ spec: - name: alertmanager-grpc port: 9095 targetPort: 9095 + - name: alertmanager-gossip-ring + port: 7946 + targetPort: 7946 selector: name: alertmanager --- @@ -337,41 +89,14 @@ spec: - name: compactor-grpc port: 9095 targetPort: 9095 + - name: compactor-gossip-ring + port: 7946 + targetPort: 7946 selector: name: compactor --- apiVersion: v1 kind: Service -metadata: - labels: - name: consul - name: consul - namespace: default -spec: - ports: - - name: consul-server - port: 8300 - targetPort: 8300 - - name: consul-serf - port: 8301 - targetPort: 8301 - - name: consul-client - port: 8400 - targetPort: 8400 - - name: consul-api - port: 8500 - targetPort: 8500 - - name: statsd-exporter-http-metrics - port: 8000 - targetPort: 8000 - - name: consul-exporter-http-metrics - port: 9107 - targetPort: 9107 - selector: - name: consul ---- -apiVersion: v1 -kind: Service metadata: labels: name: distributor @@ -386,11 +111,29 @@ spec: - name: distributor-grpc port: 9095 targetPort: 9095 + - name: distributor-gossip-ring + port: 7946 + targetPort: 7946 selector: name: distributor --- apiVersion: v1 kind: Service +metadata: + name: gossip-ring + namespace: default +spec: + clusterIP: None + ports: + - name: gossip-ring + port: 7946 + protocol: TCP + targetPort: 7946 + selector: + gossip_ring_member: "true" +--- +apiVersion: v1 +kind: Service metadata: labels: name: ingester @@ -404,6 +147,9 @@ spec: - name: ingester-grpc port: 9095 targetPort: 9095 + - name: ingester-gossip-ring + port: 7946 + targetPort: 7946 selector: name: ingester --- @@ -498,6 +244,9 @@ spec: - name: querier-grpc port: 9095 targetPort: 9095 + - name: querier-gossip-ring + port: 7946 + targetPort: 7946 selector: name: querier --- @@ -610,136 +359,14 @@ spec: - name: store-gateway-grpc port: 9095 targetPort: 9095 + - name: store-gateway-gossip-ring + port: 7946 + targetPort: 7946 selector: name: store-gateway --- apiVersion: apps/v1 kind: Deployment -metadata: - name: consul - namespace: default -spec: - minReadySeconds: 10 - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: consul - template: - metadata: - annotations: - consul-hash: e56ef6821a3557604caccaf6d5820239 - labels: - name: consul - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: consul - topologyKey: kubernetes.io/hostname - - labelSelector: - matchLabels: - name: ingester - namespaces: - - default - topologyKey: kubernetes.io/hostname - containers: - - args: - - agent - - -ui - - -server - - -client=0.0.0.0 - - -config-file=/etc/config/consul-config.json - - -bootstrap-expect=1 - - -ui-content-path=/default/consul/ - env: - - name: CHECKPOINT_DISABLE - value: "1" - image: consul:1.5.3 - imagePullPolicy: IfNotPresent - name: consul - ports: - - containerPort: 8300 - name: server - - containerPort: 8301 - name: serf - - containerPort: 8400 - name: client - - containerPort: 8500 - name: api - resources: - requests: - cpu: "4" - memory: 4Gi - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --namespace=$(POD_NAMESPACE) - - --pod-name=$(POD_NAME) - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: weaveworks/consul-sidekick:master-f18ad13 - imagePullPolicy: IfNotPresent - name: sidekick - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --web.listen-address=:8000 - - --statsd.mapping-config=/etc/config/mapping - image: prom/statsd-exporter:v0.12.2 - imagePullPolicy: IfNotPresent - name: statsd-exporter - ports: - - containerPort: 8000 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --consul.server=localhost:8500 - - --web.listen-address=:9107 - - --consul.timeout=1s - - --no-consul.health-summary - - --consul.allow_stale - image: prom/consul-exporter:v0.5.0 - imagePullPolicy: IfNotPresent - name: consul-exporter - ports: - - containerPort: 9107 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - serviceAccount: consul-sidekick - volumes: - - configMap: - name: consul - name: consul - - emptyDir: - medium: Memory - name: data ---- -apiVersion: apps/v1 -kind: Deployment metadata: name: distributor namespace: default @@ -757,6 +384,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: distributor spec: affinity: @@ -776,15 +404,16 @@ spec: - -distributor.health-check-ingesters=true - -distributor.ingestion-burst-size=200000 - -distributor.ingestion-rate-limit=10000 - - -distributor.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -distributor.ring.prefix= - - -distributor.ring.store=consul - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -distributor.ring.store=memberlist - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=1073741824 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.max-connection-age=2m - -server.grpc.keepalive.max-connection-age-grace=5m @@ -801,6 +430,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -840,6 +471,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: querier spec: affinity: @@ -861,12 +493,14 @@ spec: - -blocks-storage.s3.bucket-name=blocks-bucket - -blocks-storage.s3.endpoint=s3.dualstack.us-east-1.amazonaws.com - -distributor.health-check-ingesters=true - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 - -querier.max-concurrent=8 - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 @@ -875,10 +509,9 @@ spec: - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - -server.http-write-timeout=1m - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store.max-query-length=768h - -target=querier env: @@ -892,6 +525,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1066,6 +701,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ruler spec: affinity: @@ -1087,11 +723,13 @@ spec: - -blocks-storage.s3.bucket-name=blocks-bucket - -blocks-storage.s3.endpoint=s3.dualstack.us-east-1.amazonaws.com - -distributor.health-check-ingesters=true - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.backend=s3 - -ruler-storage.s3.bucket-name=rules-bucket - -ruler-storage.s3.endpoint=s3.dualstack.eu-west-1.amazonaws.com @@ -1099,17 +737,15 @@ spec: - -ruler.alertmanager-url=http://alertmanager.default.svc.cluster.local/alertmanager - -ruler.max-rule-groups-per-tenant=35 - -ruler.max-rules-per-rule-group=20 - - -ruler.ring.consul.hostname=consul.default.svc.cluster.local:8500 - - -ruler.ring.store=consul + - -ruler.ring.store=memberlist - -ruler.rule-path=/rules - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store.max-query-length=768h - -target=ruler image: grafana/mimir:2.1.0 @@ -1158,6 +794,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: alertmanager spec: containers: @@ -1165,11 +802,13 @@ spec: - -alertmanager-storage.backend=s3 - -alertmanager-storage.s3.bucket-name=alerts-bucket - -alertmanager-storage.s3.region=eu-west-1 - - -alertmanager.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -alertmanager.sharding-ring.replication-factor=3 - - -alertmanager.sharding-ring.store=consul + - -alertmanager.sharding-ring.store=memberlist - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true @@ -1188,6 +827,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1243,6 +884,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: compactor spec: containers: @@ -1260,13 +902,15 @@ spec: - -compactor.deletion-delay=2h - -compactor.max-closing-blocks-concurrency=2 - -compactor.max-opening-blocks-concurrency=4 - - -compactor.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -compactor.ring.prefix= - - -compactor.ring.store=consul + - -compactor.ring.store=memberlist - -compactor.ring.wait-stability-min-duration=1m - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true @@ -1280,6 +924,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1336,6 +982,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ingester spec: affinity: @@ -1356,16 +1003,18 @@ spec: - -distributor.health-check-ingesters=true - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-period=15s - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.num-tokens=512 - -ingester.ring.prefix= - -ingester.ring.readiness-check-ring-health=false - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc-max-concurrent-streams=10000 - -server.grpc.keepalive.min-time-between-pings=10s @@ -1380,6 +1029,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1648,6 +1299,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: store-gateway spec: affinity: @@ -1686,14 +1338,16 @@ spec: - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.s3.bucket-name=blocks-bucket - -blocks-storage.s3.endpoint=s3.dualstack.us-east-1.amazonaws.com + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store-gateway.sharding-ring.tokens-file-path=/data/tokens - -store-gateway.sharding-ring.wait-stability-min-duration=1m - -target=store-gateway @@ -1705,6 +1359,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready diff --git a/operations/mimir/memberlist.libsonnet b/operations/mimir/memberlist.libsonnet index 361619d2d3..514a86cca2 100644 --- a/operations/mimir/memberlist.libsonnet +++ b/operations/mimir/memberlist.libsonnet @@ -18,7 +18,7 @@ _config+:: { // Enables use of memberlist for all rings, instead of consul. If multikv_migration_enabled is true, consul hostname is still configured, // but "primary" KV depends on value of multikv_primary. - memberlist_ring_enabled: false, + memberlist_ring_enabled: true, // Migrating from consul to memberlist is a multi-step process: // 1) Enable multikv_migration_enabled, with primary=consul, secondary=memberlist, and multikv_mirror_enabled=false, restart components. From e92d1a0c953572a6d07d8fef2718b659741df17b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20=C5=A0tibran=C3=BD?= Date: Tue, 21 Jun 2022 16:12:01 +0200 Subject: [PATCH 22/63] Update documentation for memberlist migration. Added test files for each migration step. (#2160) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- .../mimir-tests/test-consul-generated.yaml | 1770 ++++++++++++ .../test-consul-multi-zone-generated.yaml | 2473 +++++++++++++++++ .../test-consul-ruler-disabled-generated.yaml | 1660 +++++++++++ ...ist-migration-step-0-before-generated.yaml | 1770 ++++++++++++ ...memberlist-migration-step-0-before.jsonnet | 24 + ...emberlist-migration-step-1-generated.yaml} | 0 ... test-memberlist-migration-step-1.jsonnet} | 5 +- ...memberlist-migration-step-2-generated.yaml | 1868 +++++++++++++ ... test-memberlist-migration-step-2.jsonnet} | 5 +- ...memberlist-migration-step-3-generated.yaml | 1868 +++++++++++++ .../test-memberlist-migration-step-3.jsonnet | 27 + ...emberlist-migration-step-4-generated.yaml} | 0 .../test-memberlist-migration-step-4.jsonnet | 27 + ...emberlist-migration-step-5-generated.yaml} | 0 .../test-memberlist-migration-step-5.jsonnet | 29 + ...list-migration-step-6-final-generated.yaml | 1426 ++++++++++ ...memberlist-migration-step-6-final.jsonnet} | 7 +- ...sharding-read-path-disabled-generated.yaml | 512 +--- operations/mimir/memberlist.libsonnet | 29 +- 19 files changed, 13054 insertions(+), 446 deletions(-) create mode 100644 operations/mimir-tests/test-consul-generated.yaml create mode 100644 operations/mimir-tests/test-consul-multi-zone-generated.yaml create mode 100644 operations/mimir-tests/test-consul-ruler-disabled-generated.yaml create mode 100644 operations/mimir-tests/test-memberlist-migration-step-0-before-generated.yaml create mode 100644 operations/mimir-tests/test-memberlist-migration-step-0-before.jsonnet rename operations/mimir-tests/{test-gossip-multikv-generated.yaml => test-memberlist-migration-step-1-generated.yaml} (100%) rename operations/mimir-tests/{test-gossip-multikv-switch-primary-secondary.jsonnet => test-memberlist-migration-step-1.jsonnet} (90%) create mode 100644 operations/mimir-tests/test-memberlist-migration-step-2-generated.yaml rename operations/mimir-tests/{test-gossip-multikv.jsonnet => test-memberlist-migration-step-2.jsonnet} (89%) create mode 100644 operations/mimir-tests/test-memberlist-migration-step-3-generated.yaml create mode 100644 operations/mimir-tests/test-memberlist-migration-step-3.jsonnet rename operations/mimir-tests/{test-gossip-multikv-switch-primary-secondary-generated.yaml => test-memberlist-migration-step-4-generated.yaml} (100%) create mode 100644 operations/mimir-tests/test-memberlist-migration-step-4.jsonnet rename operations/mimir-tests/{test-gossip-multikv-teardown-generated.yaml => test-memberlist-migration-step-5-generated.yaml} (100%) create mode 100644 operations/mimir-tests/test-memberlist-migration-step-5.jsonnet create mode 100644 operations/mimir-tests/test-memberlist-migration-step-6-final-generated.yaml rename operations/mimir-tests/{test-gossip-multikv-teardown.jsonnet => test-memberlist-migration-step-6-final.jsonnet} (81%) diff --git a/operations/mimir-tests/test-consul-generated.yaml b/operations/mimir-tests/test-consul-generated.yaml new file mode 100644 index 0000000000..e64d662b4d --- /dev/null +++ b/operations/mimir-tests/test-consul-generated.yaml @@ -0,0 +1,1770 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: default +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: alertmanager-pdb + name: alertmanager-pdb + namespace: default +spec: + maxUnavailable: 1 + selector: + matchLabels: + name: alertmanager +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: ingester-pdb + name: ingester-pdb + namespace: default +spec: + maxUnavailable: 1 + selector: + matchLabels: + name: ingester +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: store-gateway-pdb + name: store-gateway-pdb + namespace: default +spec: + maxUnavailable: 2 + selector: + matchLabels: + name: store-gateway +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: consul-sidekick + namespace: default +--- +apiVersion: v1 +data: + consul-config.json: '{"leave_on_terminate": true, "raft_snapshot_threshold": 128, + "raft_trailing_logs": 10000, "telemetry": {"dogstatsd_addr": "127.0.0.1:9125"}}' + mapping: | + mappings: + - match: consul.*.runtime.* + name: consul_runtime + labels: + type: $2 + - match: consul.runtime.total_gc_pause_ns + name: consul_runtime_total_gc_pause_ns + labels: + type: $2 + - match: consul.consul.health.service.query-tag.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3 + - match: consul.consul.health.service.query-tag.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4 + - match: consul.consul.health.service.query-tag.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11.$12 + - match: consul.consul.catalog.deregister + name: consul_catalog_deregister + labels: {} + - match: consul.consul.dns.domain_query.*.*.*.*.* + name: consul_dns_domain_query + labels: + query: $1.$2.$3.$4.$5 + - match: consul.consul.health.service.not-found.* + name: consul_health_service_not_found + labels: + query: $1 + - match: consul.consul.health.service.query.* + name: consul_health_service_query + labels: + query: $1 + - match: consul.*.memberlist.health.score + name: consul_memberlist_health_score + labels: {} + - match: consul.serf.queue.* + name: consul_serf_events + labels: + type: $1 + - match: consul.serf.snapshot.appendLine + name: consul_serf_snapshot_appendLine + labels: + type: $1 + - match: consul.serf.coordinate.adjustment-ms + name: consul_serf_coordinate_adjustment_ms + labels: {} + - match: consul.consul.rpc.query + name: consul_rpc_query + labels: {} + - match: consul.*.consul.session_ttl.active + name: consul_session_ttl_active + labels: {} + - match: consul.raft.rpc.* + name: consul_raft_rpc + labels: + type: $1 + - match: consul.raft.rpc.appendEntries.storeLogs + name: consul_raft_rpc_appendEntries_storeLogs + labels: + type: $1 + - match: consul.consul.fsm.persist + name: consul_fsm_persist + labels: {} + - match: consul.raft.fsm.apply + name: consul_raft_fsm_apply + labels: {} + - match: consul.raft.leader.lastContact + name: consul_raft_leader_lastcontact + labels: {} + - match: consul.raft.leader.dispatchLog + name: consul_raft_leader_dispatchLog + labels: {} + - match: consul.raft.commitTime + name: consul_raft_commitTime + labels: {} + - match: consul.raft.replication.appendEntries.logs.*.*.*.* + name: consul_raft_replication_appendEntries_logs + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.raft.replication.appendEntries.rpc.*.*.*.* + name: consul_raft_replication_appendEntries_rpc + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.raft.replication.heartbeat.*.*.*.* + name: consul_raft_replication_heartbeat + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.consul.rpc.request + name: consul_rpc_requests + labels: {} + - match: consul.consul.rpc.accept_conn + name: consul_rpc_accept_conn + labels: {} + - match: consul.memberlist.udp.* + name: consul_memberlist_udp + labels: + type: $1 + - match: consul.memberlist.tcp.* + name: consul_memberlist_tcp + labels: + type: $1 + - match: consul.memberlist.gossip + name: consul_memberlist_gossip + labels: {} + - match: consul.memberlist.probeNode + name: consul_memberlist_probenode + labels: {} + - match: consul.memberlist.pushPullNode + name: consul_memberlist_pushpullnode + labels: {} + - match: consul.http.* + name: consul_http_request + labels: + method: $1 + path: / + - match: consul.http.*.* + name: consul_http_request + labels: + method: $1 + path: /$2 + - match: consul.http.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3 + - match: consul.http.*.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3/$4 + - match: consul.http.*.*.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3/$4/$5 + - match: consul.consul.leader.barrier + name: consul_leader_barrier + labels: {} + - match: consul.consul.leader.reconcileMember + name: consul_leader_reconcileMember + labels: {} + - match: consul.consul.leader.reconcile + name: consul_leader_reconcile + labels: {} + - match: consul.consul.fsm.coordinate.batch-update + name: consul_fsm_coordinate_batch_update + labels: {} + - match: consul.consul.fsm.autopilot + name: consul_fsm_autopilot + labels: {} + - match: consul.consul.fsm.kvs.cas + name: consul_fsm_kvs_cas + labels: {} + - match: consul.consul.fsm.register + name: consul_fsm_register + labels: {} + - match: consul.consul.fsm.deregister + name: consul_fsm_deregister + labels: {} + - match: consul.consul.fsm.tombstone.reap + name: consul_fsm_tombstone_reap + labels: {} + - match: consul.consul.catalog.register + name: consul_catalog_register + labels: {} + - match: consul.consul.catalog.deregister + name: consul_catalog_deregister + labels: {} + - match: consul.consul.leader.reapTombstones + name: consul_leader_reapTombstones + labels: {} +kind: ConfigMap +metadata: + name: consul + namespace: default +--- +apiVersion: v1 +data: + overrides.yaml: | + overrides: {} +kind: ConfigMap +metadata: + name: overrides + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: consul-sidekick + namespace: default +rules: +- apiGroups: + - "" + - extensions + - apps + resources: + - pods + - replicasets + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: consul-sidekick + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: consul-sidekick +subjects: +- kind: ServiceAccount + name: consul-sidekick + namespace: default +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: alertmanager + name: alertmanager + namespace: default +spec: + clusterIP: None + ports: + - name: alertmanager-http-metrics + port: 8080 + targetPort: 8080 + - name: alertmanager-grpc + port: 9095 + targetPort: 9095 + selector: + name: alertmanager +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: compactor + name: compactor + namespace: default +spec: + clusterIP: None + ports: + - name: compactor-http-metrics + port: 8080 + targetPort: 8080 + - name: compactor-grpc + port: 9095 + targetPort: 9095 + selector: + name: compactor +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: consul + name: consul + namespace: default +spec: + ports: + - name: consul-server + port: 8300 + targetPort: 8300 + - name: consul-serf + port: 8301 + targetPort: 8301 + - name: consul-client + port: 8400 + targetPort: 8400 + - name: consul-api + port: 8500 + targetPort: 8500 + - name: statsd-exporter-http-metrics + port: 8000 + targetPort: 8000 + - name: consul-exporter-http-metrics + port: 9107 + targetPort: 9107 + selector: + name: consul +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: distributor + name: distributor + namespace: default +spec: + clusterIP: None + ports: + - name: distributor-http-metrics + port: 8080 + targetPort: 8080 + - name: distributor-grpc + port: 9095 + targetPort: 9095 + selector: + name: distributor +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ingester + name: ingester + namespace: default +spec: + ports: + - name: ingester-http-metrics + port: 8080 + targetPort: 8080 + - name: ingester-grpc + port: 9095 + targetPort: 9095 + selector: + name: ingester +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached + name: memcached + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-frontend + name: memcached-frontend + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-index-queries + name: memcached-index-queries + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-index-queries +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-metadata + name: memcached-metadata + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-metadata +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: querier + name: querier + namespace: default +spec: + ports: + - name: querier-http-metrics + port: 8080 + targetPort: 8080 + - name: querier-grpc + port: 9095 + targetPort: 9095 + selector: + name: querier +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-frontend + name: query-frontend + namespace: default +spec: + ports: + - name: query-frontend-http-metrics + port: 8080 + targetPort: 8080 + - name: query-frontend-grpc + port: 9095 + targetPort: 9095 + selector: + name: query-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-frontend + name: query-frontend-discovery + namespace: default +spec: + clusterIP: None + ports: + - name: query-frontend-http-metrics + port: 8080 + targetPort: 8080 + - name: query-frontend-grpc + port: 9095 + targetPort: 9095 + publishNotReadyAddresses: true + selector: + name: query-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-scheduler + name: query-scheduler + namespace: default +spec: + ports: + - name: query-scheduler-http-metrics + port: 8080 + targetPort: 8080 + - name: query-scheduler-grpc + port: 9095 + targetPort: 9095 + selector: + name: query-scheduler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-scheduler + name: query-scheduler-discovery + namespace: default +spec: + clusterIP: None + ports: + - name: query-scheduler-http-metrics + port: 8080 + targetPort: 8080 + - name: query-scheduler-grpc + port: 9095 + targetPort: 9095 + publishNotReadyAddresses: true + selector: + name: query-scheduler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ruler + name: ruler + namespace: default +spec: + ports: + - name: ruler-http-metrics + port: 8080 + targetPort: 8080 + - name: ruler-grpc + port: 9095 + targetPort: 9095 + selector: + name: ruler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: store-gateway + name: store-gateway + namespace: default +spec: + ports: + - name: store-gateway-http-metrics + port: 8080 + targetPort: 8080 + - name: store-gateway-grpc + port: 9095 + targetPort: 9095 + selector: + name: store-gateway +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: consul + namespace: default +spec: + minReadySeconds: 10 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: consul + template: + metadata: + annotations: + consul-hash: e56ef6821a3557604caccaf6d5820239 + labels: + name: consul + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: consul + topologyKey: kubernetes.io/hostname + - labelSelector: + matchLabels: + name: ingester + namespaces: + - default + topologyKey: kubernetes.io/hostname + containers: + - args: + - agent + - -ui + - -server + - -client=0.0.0.0 + - -config-file=/etc/config/consul-config.json + - -bootstrap-expect=1 + - -ui-content-path=/default/consul/ + env: + - name: CHECKPOINT_DISABLE + value: "1" + image: consul:1.5.3 + imagePullPolicy: IfNotPresent + name: consul + ports: + - containerPort: 8300 + name: server + - containerPort: 8301 + name: serf + - containerPort: 8400 + name: client + - containerPort: 8500 + name: api + resources: + requests: + cpu: "4" + memory: 4Gi + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --namespace=$(POD_NAMESPACE) + - --pod-name=$(POD_NAME) + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + image: weaveworks/consul-sidekick:master-f18ad13 + imagePullPolicy: IfNotPresent + name: sidekick + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --web.listen-address=:8000 + - --statsd.mapping-config=/etc/config/mapping + image: prom/statsd-exporter:v0.12.2 + imagePullPolicy: IfNotPresent + name: statsd-exporter + ports: + - containerPort: 8000 + name: http-metrics + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --consul.server=localhost:8500 + - --web.listen-address=:9107 + - --consul.timeout=1s + - --no-consul.health-summary + - --consul.allow_stale + image: prom/consul-exporter:v0.5.0 + imagePullPolicy: IfNotPresent + name: consul-exporter + ports: + - containerPort: 9107 + name: http-metrics + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + serviceAccount: consul-sidekick + volumes: + - configMap: + name: consul + name: consul + - emptyDir: + medium: Memory + name: data +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: distributor + namespace: default +spec: + minReadySeconds: 10 + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: distributor + strategy: + rollingUpdate: + maxSurge: 5 + maxUnavailable: 1 + template: + metadata: + labels: + name: distributor + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: distributor + topologyKey: kubernetes.io/hostname + containers: + - args: + - -distributor.ha-tracker.enable=true + - -distributor.ha-tracker.enable-for-all-users=true + - -distributor.ha-tracker.etcd.endpoints=etcd-client.default.svc.cluster.local.:2379 + - -distributor.ha-tracker.prefix=prom_ha/ + - -distributor.ha-tracker.store=etcd + - -distributor.health-check-ingesters=true + - -distributor.ingestion-burst-size=200000 + - -distributor.ingestion-rate-limit=10000 + - -distributor.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -distributor.ring.prefix= + - -distributor.ring.store=consul + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -mem-ballast-size-bytes=1073741824 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.max-connection-age=2m + - -server.grpc.keepalive.max-connection-age-grace=5m + - -server.grpc.keepalive.max-connection-idle=1m + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=distributor + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: distributor + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 4Gi + requests: + cpu: "2" + memory: 2Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: querier + namespace: default +spec: + minReadySeconds: 10 + replicas: 6 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: querier + strategy: + rollingUpdate: + maxSurge: 5 + maxUnavailable: 1 + template: + metadata: + labels: + name: querier + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: querier + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -distributor.health-check-ingesters=true + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -mem-ballast-size-bytes=268435456 + - -querier.frontend-client.grpc-max-send-msg-size=104857600 + - -querier.max-concurrent=8 + - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -server.http-write-timeout=1m + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=consul + - -store.max-query-length=768h + - -target=querier + env: + - name: JAEGER_REPORTER_MAX_QUEUE_SIZE + value: "1024" + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: querier + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 24Gi + requests: + cpu: "1" + memory: 12Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: query-frontend + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: query-frontend + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + template: + metadata: + labels: + name: query-frontend + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: query-frontend + topologyKey: kubernetes.io/hostname + containers: + - args: + - -query-frontend.align-querier-with-step=false + - -query-frontend.cache-results=true + - -query-frontend.max-cache-freshness=10m + - -query-frontend.results-cache.backend=memcached + - -query-frontend.results-cache.memcached.addresses=dnssrvnoa+memcached-frontend.default.svc.cluster.local:11211 + - -query-frontend.results-cache.memcached.timeout=500ms + - -query-frontend.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -server.http-write-timeout=1m + - -store.max-query-length=12000h + - -target=query-frontend + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: query-frontend + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 1200Mi + requests: + cpu: "2" + memory: 600Mi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: query-scheduler + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: query-scheduler + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + name: query-scheduler + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: query-scheduler + topologyKey: kubernetes.io/hostname + containers: + - args: + - -query-scheduler.max-outstanding-requests-per-tenant=100 + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=query-scheduler + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: query-scheduler + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 2Gi + requests: + cpu: "2" + memory: 1Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ruler + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: ruler + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + name: ruler + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: ruler + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -distributor.health-check-ingesters=true + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -ruler-storage.backend=gcs + - -ruler-storage.gcs.bucket-name=rules-bucket + - -ruler.alertmanager-url=http://alertmanager.default.svc.cluster.local/alertmanager + - -ruler.max-rule-groups-per-tenant=35 + - -ruler.max-rules-per-rule-group=20 + - -ruler.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ruler.ring.store=consul + - -ruler.rule-path=/rules + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=consul + - -store.max-query-length=768h + - -target=ruler + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ruler + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + cpu: "16" + memory: 16Gi + requests: + cpu: "1" + memory: 6Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + terminationGracePeriodSeconds: 600 + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: alertmanager + name: alertmanager + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: alertmanager + serviceName: alertmanager + template: + metadata: + labels: + name: alertmanager + spec: + containers: + - args: + - -alertmanager-storage.backend=gcs + - -alertmanager-storage.gcs.bucket-name=alerts-bucket + - -alertmanager.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -alertmanager.sharding-ring.replication-factor=3 + - -alertmanager.sharding-ring.store=consul + - -alertmanager.storage.path=/data + - -alertmanager.web.external-url=http://test/alertmanager + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=alertmanager + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: alertmanager + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 15Gi + requests: + cpu: "2" + memory: 10Gi + volumeMounts: + - mountPath: /data + name: alertmanager-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 900 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: alertmanager-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: compactor + name: compactor + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + name: compactor + serviceName: compactor + template: + metadata: + labels: + name: compactor + spec: + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -compactor.block-ranges=2h,12h,24h + - -compactor.blocks-retention-period=0 + - -compactor.cleanup-interval=15m + - -compactor.compaction-concurrency=1 + - -compactor.compaction-interval=30m + - -compactor.compactor-tenant-shard-size=1 + - -compactor.data-dir=/data + - -compactor.deletion-delay=2h + - -compactor.max-closing-blocks-concurrency=2 + - -compactor.max-opening-blocks-concurrency=4 + - -compactor.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -compactor.ring.prefix= + - -compactor.ring.store=consul + - -compactor.ring.wait-stability-min-duration=1m + - -compactor.split-and-merge-shards=0 + - -compactor.split-groups=1 + - -compactor.symbols-flushers-concurrency=4 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=compactor + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: compactor + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 6Gi + requests: + cpu: 1 + memory: 6Gi + volumeMounts: + - mountPath: /data + name: compactor-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 900 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: compactor-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 250Gi + storageClassName: standard +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: ingester + name: ingester + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 3 + selector: + matchLabels: + name: ingester + serviceName: ingester + template: + metadata: + labels: + name: ingester + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: ingester + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -blocks-storage.tsdb.block-ranges-period=2h + - -blocks-storage.tsdb.dir=/data/tsdb + - -blocks-storage.tsdb.ship-interval=1m + - -distributor.health-check-ingesters=true + - -ingester.max-global-series-per-metric=20000 + - -ingester.max-global-series-per-user=150000 + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-period=15s + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.num-tokens=512 + - -ingester.ring.prefix= + - -ingester.ring.readiness-check-ring-health=false + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -ingester.ring.tokens-file-path=/data/tokens + - -ingester.ring.unregister-on-shutdown=true + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc-max-concurrent-streams=10000 + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=ingester + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ingester + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 25Gi + requests: + cpu: "4" + memory: 15Gi + volumeMounts: + - mountPath: /data + name: ingester-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 1200 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: ingester-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi + storageClassName: fast +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached + serviceName: memcached + template: + metadata: + labels: + name: memcached + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 6144 + - -I 1m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 9Gi + requests: + cpu: 500m + memory: 6552Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-frontend + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached-frontend + serviceName: memcached-frontend + template: + metadata: + labels: + name: memcached-frontend + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-frontend + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 1024 + - -I 5m + - -c 1024 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 1536Mi + requests: + cpu: 500m + memory: 1329Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-index-queries + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached-index-queries + serviceName: memcached-index-queries + template: + metadata: + labels: + name: memcached-index-queries + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-index-queries + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 1024 + - -I 5m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 1536Mi + requests: + cpu: 500m + memory: 1329Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-metadata + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + name: memcached-metadata + serviceName: memcached-metadata + template: + metadata: + labels: + name: memcached-metadata + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-metadata + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 512 + - -I 1m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 768Mi + requests: + cpu: 500m + memory: 715Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: store-gateway + name: store-gateway + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 3 + selector: + matchLabels: + name: store-gateway + serviceName: store-gateway + template: + metadata: + labels: + name: store-gateway + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: store-gateway + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.chunks-cache.backend=memcached + - -blocks-storage.bucket-store.chunks-cache.memcached.addresses=dnssrvnoa+memcached.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms + - -blocks-storage.bucket-store.index-cache.backend=memcached + - -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880 + - -blocks-storage.bucket-store.index-header-lazy-loading-enabled=true + - -blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout=60m + - -blocks-storage.bucket-store.max-chunk-pool-bytes=12884901888 + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.tokens-file-path=/data/tokens + - -store-gateway.sharding-ring.wait-stability-min-duration=1m + - -target=store-gateway + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: store-gateway + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 18Gi + requests: + cpu: "1" + memory: 12Gi + volumeMounts: + - mountPath: /data + name: store-gateway-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 120 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: store-gateway-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Gi + storageClassName: standard +--- +apiVersion: etcd.database.coreos.com/v1beta2 +kind: EtcdCluster +metadata: + annotations: + etcd.database.coreos.com/scope: clusterwide + name: etcd + namespace: default +spec: + pod: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + etcd_cluster: etcd + topologyKey: kubernetes.io/hostname + annotations: + prometheus.io/port: "2379" + prometheus.io/scrape: "true" + etcdEnv: + - name: ETCD_AUTO_COMPACTION_RETENTION + value: 1h + labels: + name: etcd + resources: + limits: + memory: 512Mi + requests: + cpu: 500m + memory: 512Mi + size: 3 + version: 3.3.13 diff --git a/operations/mimir-tests/test-consul-multi-zone-generated.yaml b/operations/mimir-tests/test-consul-multi-zone-generated.yaml new file mode 100644 index 0000000000..378afdf269 --- /dev/null +++ b/operations/mimir-tests/test-consul-multi-zone-generated.yaml @@ -0,0 +1,2473 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: default +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: alertmanager-pdb + name: alertmanager-pdb + namespace: default +spec: + maxUnavailable: 1 + selector: + matchLabels: + name: alertmanager +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: ingester-rollout-pdb + name: ingester-rollout-pdb + namespace: default +spec: + maxUnavailable: 1 + selector: + matchLabels: + rollout-group: ingester +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: store-gateway-rollout-pdb + name: store-gateway-rollout-pdb + namespace: default +spec: + maxUnavailable: 1 + selector: + matchLabels: + rollout-group: store-gateway +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: consul-sidekick + namespace: default +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: rollout-operator + namespace: default +--- +apiVersion: v1 +data: + consul-config.json: '{"leave_on_terminate": true, "raft_snapshot_threshold": 128, + "raft_trailing_logs": 10000, "telemetry": {"dogstatsd_addr": "127.0.0.1:9125"}}' + mapping: | + mappings: + - match: consul.*.runtime.* + name: consul_runtime + labels: + type: $2 + - match: consul.runtime.total_gc_pause_ns + name: consul_runtime_total_gc_pause_ns + labels: + type: $2 + - match: consul.consul.health.service.query-tag.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3 + - match: consul.consul.health.service.query-tag.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4 + - match: consul.consul.health.service.query-tag.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11.$12 + - match: consul.consul.catalog.deregister + name: consul_catalog_deregister + labels: {} + - match: consul.consul.dns.domain_query.*.*.*.*.* + name: consul_dns_domain_query + labels: + query: $1.$2.$3.$4.$5 + - match: consul.consul.health.service.not-found.* + name: consul_health_service_not_found + labels: + query: $1 + - match: consul.consul.health.service.query.* + name: consul_health_service_query + labels: + query: $1 + - match: consul.*.memberlist.health.score + name: consul_memberlist_health_score + labels: {} + - match: consul.serf.queue.* + name: consul_serf_events + labels: + type: $1 + - match: consul.serf.snapshot.appendLine + name: consul_serf_snapshot_appendLine + labels: + type: $1 + - match: consul.serf.coordinate.adjustment-ms + name: consul_serf_coordinate_adjustment_ms + labels: {} + - match: consul.consul.rpc.query + name: consul_rpc_query + labels: {} + - match: consul.*.consul.session_ttl.active + name: consul_session_ttl_active + labels: {} + - match: consul.raft.rpc.* + name: consul_raft_rpc + labels: + type: $1 + - match: consul.raft.rpc.appendEntries.storeLogs + name: consul_raft_rpc_appendEntries_storeLogs + labels: + type: $1 + - match: consul.consul.fsm.persist + name: consul_fsm_persist + labels: {} + - match: consul.raft.fsm.apply + name: consul_raft_fsm_apply + labels: {} + - match: consul.raft.leader.lastContact + name: consul_raft_leader_lastcontact + labels: {} + - match: consul.raft.leader.dispatchLog + name: consul_raft_leader_dispatchLog + labels: {} + - match: consul.raft.commitTime + name: consul_raft_commitTime + labels: {} + - match: consul.raft.replication.appendEntries.logs.*.*.*.* + name: consul_raft_replication_appendEntries_logs + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.raft.replication.appendEntries.rpc.*.*.*.* + name: consul_raft_replication_appendEntries_rpc + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.raft.replication.heartbeat.*.*.*.* + name: consul_raft_replication_heartbeat + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.consul.rpc.request + name: consul_rpc_requests + labels: {} + - match: consul.consul.rpc.accept_conn + name: consul_rpc_accept_conn + labels: {} + - match: consul.memberlist.udp.* + name: consul_memberlist_udp + labels: + type: $1 + - match: consul.memberlist.tcp.* + name: consul_memberlist_tcp + labels: + type: $1 + - match: consul.memberlist.gossip + name: consul_memberlist_gossip + labels: {} + - match: consul.memberlist.probeNode + name: consul_memberlist_probenode + labels: {} + - match: consul.memberlist.pushPullNode + name: consul_memberlist_pushpullnode + labels: {} + - match: consul.http.* + name: consul_http_request + labels: + method: $1 + path: / + - match: consul.http.*.* + name: consul_http_request + labels: + method: $1 + path: /$2 + - match: consul.http.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3 + - match: consul.http.*.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3/$4 + - match: consul.http.*.*.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3/$4/$5 + - match: consul.consul.leader.barrier + name: consul_leader_barrier + labels: {} + - match: consul.consul.leader.reconcileMember + name: consul_leader_reconcileMember + labels: {} + - match: consul.consul.leader.reconcile + name: consul_leader_reconcile + labels: {} + - match: consul.consul.fsm.coordinate.batch-update + name: consul_fsm_coordinate_batch_update + labels: {} + - match: consul.consul.fsm.autopilot + name: consul_fsm_autopilot + labels: {} + - match: consul.consul.fsm.kvs.cas + name: consul_fsm_kvs_cas + labels: {} + - match: consul.consul.fsm.register + name: consul_fsm_register + labels: {} + - match: consul.consul.fsm.deregister + name: consul_fsm_deregister + labels: {} + - match: consul.consul.fsm.tombstone.reap + name: consul_fsm_tombstone_reap + labels: {} + - match: consul.consul.catalog.register + name: consul_catalog_register + labels: {} + - match: consul.consul.catalog.deregister + name: consul_catalog_deregister + labels: {} + - match: consul.consul.leader.reapTombstones + name: consul_leader_reapTombstones + labels: {} +kind: ConfigMap +metadata: + name: consul + namespace: default +--- +apiVersion: v1 +data: + overrides.yaml: | + overrides: {} +kind: ConfigMap +metadata: + name: overrides + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: consul-sidekick + namespace: default +rules: +- apiGroups: + - "" + - extensions + - apps + resources: + - pods + - replicasets + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: rollout-operator-role + namespace: default +rules: +- apiGroups: + - "" + resources: + - pods + verbs: + - list + - get + - watch + - delete +- apiGroups: + - apps + resources: + - statefulsets + verbs: + - list + - get + - watch +- apiGroups: + - apps + resources: + - statefulsets/status + verbs: + - update +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: consul-sidekick + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: consul-sidekick +subjects: +- kind: ServiceAccount + name: consul-sidekick + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: rollout-operator-rolebinding + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: rollout-operator-role +subjects: +- kind: ServiceAccount + name: rollout-operator + namespace: default +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: alertmanager + name: alertmanager + namespace: default +spec: + clusterIP: None + ports: + - name: alertmanager-http-metrics + port: 8080 + targetPort: 8080 + - name: alertmanager-grpc + port: 9095 + targetPort: 9095 + selector: + name: alertmanager +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: compactor + name: compactor + namespace: default +spec: + clusterIP: None + ports: + - name: compactor-http-metrics + port: 8080 + targetPort: 8080 + - name: compactor-grpc + port: 9095 + targetPort: 9095 + selector: + name: compactor +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: consul + name: consul + namespace: default +spec: + ports: + - name: consul-server + port: 8300 + targetPort: 8300 + - name: consul-serf + port: 8301 + targetPort: 8301 + - name: consul-client + port: 8400 + targetPort: 8400 + - name: consul-api + port: 8500 + targetPort: 8500 + - name: statsd-exporter-http-metrics + port: 8000 + targetPort: 8000 + - name: consul-exporter-http-metrics + port: 9107 + targetPort: 9107 + selector: + name: consul +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: distributor + name: distributor + namespace: default +spec: + clusterIP: None + ports: + - name: distributor-http-metrics + port: 8080 + targetPort: 8080 + - name: distributor-grpc + port: 9095 + targetPort: 9095 + selector: + name: distributor +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ingester-zone-a + name: ingester-zone-a + namespace: default +spec: + clusterIP: None + ports: + - name: ingester-http-metrics + port: 8080 + targetPort: 8080 + - name: ingester-grpc + port: 9095 + targetPort: 9095 + selector: + name: ingester-zone-a + rollout-group: ingester +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ingester-zone-b + name: ingester-zone-b + namespace: default +spec: + clusterIP: None + ports: + - name: ingester-http-metrics + port: 8080 + targetPort: 8080 + - name: ingester-grpc + port: 9095 + targetPort: 9095 + selector: + name: ingester-zone-b + rollout-group: ingester +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ingester-zone-c + name: ingester-zone-c + namespace: default +spec: + clusterIP: None + ports: + - name: ingester-http-metrics + port: 8080 + targetPort: 8080 + - name: ingester-grpc + port: 9095 + targetPort: 9095 + selector: + name: ingester-zone-c + rollout-group: ingester +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached + name: memcached + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-frontend + name: memcached-frontend + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-index-queries + name: memcached-index-queries + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-index-queries +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-metadata + name: memcached-metadata + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-metadata +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: querier + name: querier + namespace: default +spec: + ports: + - name: querier-http-metrics + port: 8080 + targetPort: 8080 + - name: querier-grpc + port: 9095 + targetPort: 9095 + selector: + name: querier +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-frontend + name: query-frontend + namespace: default +spec: + ports: + - name: query-frontend-http-metrics + port: 8080 + targetPort: 8080 + - name: query-frontend-grpc + port: 9095 + targetPort: 9095 + selector: + name: query-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-frontend + name: query-frontend-discovery + namespace: default +spec: + clusterIP: None + ports: + - name: query-frontend-http-metrics + port: 8080 + targetPort: 8080 + - name: query-frontend-grpc + port: 9095 + targetPort: 9095 + publishNotReadyAddresses: true + selector: + name: query-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-scheduler + name: query-scheduler + namespace: default +spec: + ports: + - name: query-scheduler-http-metrics + port: 8080 + targetPort: 8080 + - name: query-scheduler-grpc + port: 9095 + targetPort: 9095 + selector: + name: query-scheduler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-scheduler + name: query-scheduler-discovery + namespace: default +spec: + clusterIP: None + ports: + - name: query-scheduler-http-metrics + port: 8080 + targetPort: 8080 + - name: query-scheduler-grpc + port: 9095 + targetPort: 9095 + publishNotReadyAddresses: true + selector: + name: query-scheduler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ruler + name: ruler + namespace: default +spec: + ports: + - name: ruler-http-metrics + port: 8080 + targetPort: 8080 + - name: ruler-grpc + port: 9095 + targetPort: 9095 + selector: + name: ruler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: store-gateway-multi-zone + name: store-gateway-multi-zone + namespace: default +spec: + ports: + - name: store-gateway-http-metrics + port: 80 + protocol: TCP + targetPort: 80 + selector: + rollout-group: store-gateway +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: store-gateway-zone-a + name: store-gateway-zone-a + namespace: default +spec: + clusterIP: None + ports: + - name: store-gateway-http-metrics + port: 8080 + targetPort: 8080 + - name: store-gateway-grpc + port: 9095 + targetPort: 9095 + selector: + name: store-gateway-zone-a + rollout-group: store-gateway +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: store-gateway-zone-b + name: store-gateway-zone-b + namespace: default +spec: + clusterIP: None + ports: + - name: store-gateway-http-metrics + port: 8080 + targetPort: 8080 + - name: store-gateway-grpc + port: 9095 + targetPort: 9095 + selector: + name: store-gateway-zone-b + rollout-group: store-gateway +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: store-gateway-zone-c + name: store-gateway-zone-c + namespace: default +spec: + clusterIP: None + ports: + - name: store-gateway-http-metrics + port: 8080 + targetPort: 8080 + - name: store-gateway-grpc + port: 9095 + targetPort: 9095 + selector: + name: store-gateway-zone-c + rollout-group: store-gateway +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: consul + namespace: default +spec: + minReadySeconds: 10 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: consul + template: + metadata: + annotations: + consul-hash: e56ef6821a3557604caccaf6d5820239 + labels: + name: consul + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: consul + topologyKey: kubernetes.io/hostname + - labelSelector: + matchLabels: + name: ingester + namespaces: + - default + topologyKey: kubernetes.io/hostname + containers: + - args: + - agent + - -ui + - -server + - -client=0.0.0.0 + - -config-file=/etc/config/consul-config.json + - -bootstrap-expect=1 + - -ui-content-path=/default/consul/ + env: + - name: CHECKPOINT_DISABLE + value: "1" + image: consul:1.5.3 + imagePullPolicy: IfNotPresent + name: consul + ports: + - containerPort: 8300 + name: server + - containerPort: 8301 + name: serf + - containerPort: 8400 + name: client + - containerPort: 8500 + name: api + resources: + requests: + cpu: "4" + memory: 4Gi + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --namespace=$(POD_NAMESPACE) + - --pod-name=$(POD_NAME) + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + image: weaveworks/consul-sidekick:master-f18ad13 + imagePullPolicy: IfNotPresent + name: sidekick + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --web.listen-address=:8000 + - --statsd.mapping-config=/etc/config/mapping + image: prom/statsd-exporter:v0.12.2 + imagePullPolicy: IfNotPresent + name: statsd-exporter + ports: + - containerPort: 8000 + name: http-metrics + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --consul.server=localhost:8500 + - --web.listen-address=:9107 + - --consul.timeout=1s + - --no-consul.health-summary + - --consul.allow_stale + image: prom/consul-exporter:v0.5.0 + imagePullPolicy: IfNotPresent + name: consul-exporter + ports: + - containerPort: 9107 + name: http-metrics + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + serviceAccount: consul-sidekick + volumes: + - configMap: + name: consul + name: consul + - emptyDir: + medium: Memory + name: data +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: distributor + namespace: default +spec: + minReadySeconds: 10 + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: distributor + strategy: + rollingUpdate: + maxSurge: 5 + maxUnavailable: 1 + template: + metadata: + labels: + name: distributor + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: distributor + topologyKey: kubernetes.io/hostname + containers: + - args: + - -distributor.ha-tracker.enable=true + - -distributor.ha-tracker.enable-for-all-users=true + - -distributor.ha-tracker.etcd.endpoints=etcd-client.default.svc.cluster.local.:2379 + - -distributor.ha-tracker.prefix=prom_ha/ + - -distributor.ha-tracker.store=etcd + - -distributor.health-check-ingesters=true + - -distributor.ingestion-burst-size=200000 + - -distributor.ingestion-rate-limit=10000 + - -distributor.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -distributor.ring.prefix= + - -distributor.ring.store=consul + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -ingester.ring.zone-awareness-enabled=true + - -mem-ballast-size-bytes=1073741824 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.max-connection-age=2m + - -server.grpc.keepalive.max-connection-age-grace=5m + - -server.grpc.keepalive.max-connection-idle=1m + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=distributor + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: distributor + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 4Gi + requests: + cpu: "2" + memory: 2Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: querier + namespace: default +spec: + minReadySeconds: 10 + replicas: 6 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: querier + strategy: + rollingUpdate: + maxSurge: 5 + maxUnavailable: 1 + template: + metadata: + labels: + name: querier + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: querier + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -distributor.health-check-ingesters=true + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -ingester.ring.zone-awareness-enabled=true + - -mem-ballast-size-bytes=268435456 + - -querier.frontend-client.grpc-max-send-msg-size=104857600 + - -querier.max-concurrent=8 + - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -server.http-write-timeout=1m + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.prefix=multi-zone/ + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.zone-awareness-enabled=true + - -store.max-query-length=768h + - -target=querier + env: + - name: JAEGER_REPORTER_MAX_QUEUE_SIZE + value: "1024" + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: querier + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 24Gi + requests: + cpu: "1" + memory: 12Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: query-frontend + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: query-frontend + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + template: + metadata: + labels: + name: query-frontend + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: query-frontend + topologyKey: kubernetes.io/hostname + containers: + - args: + - -query-frontend.align-querier-with-step=false + - -query-frontend.cache-results=true + - -query-frontend.max-cache-freshness=10m + - -query-frontend.results-cache.backend=memcached + - -query-frontend.results-cache.memcached.addresses=dnssrvnoa+memcached-frontend.default.svc.cluster.local:11211 + - -query-frontend.results-cache.memcached.timeout=500ms + - -query-frontend.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -server.http-write-timeout=1m + - -store.max-query-length=12000h + - -target=query-frontend + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: query-frontend + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 1200Mi + requests: + cpu: "2" + memory: 600Mi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: query-scheduler + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: query-scheduler + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + name: query-scheduler + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: query-scheduler + topologyKey: kubernetes.io/hostname + containers: + - args: + - -query-scheduler.max-outstanding-requests-per-tenant=100 + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=query-scheduler + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: query-scheduler + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 2Gi + requests: + cpu: "2" + memory: 1Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: rollout-operator + namespace: default +spec: + minReadySeconds: 10 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: rollout-operator + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + name: rollout-operator + spec: + containers: + - args: + - -kubernetes.namespace=default + image: grafana/rollout-operator:v0.1.1 + imagePullPolicy: IfNotPresent + name: rollout-operator + ports: + - containerPort: 8001 + name: http-metrics + readinessProbe: + httpGet: + path: /ready + port: 8001 + initialDelaySeconds: 5 + timeoutSeconds: 1 + resources: + limits: + cpu: "1" + memory: 200Mi + requests: + cpu: 100m + memory: 100Mi + serviceAccountName: rollout-operator +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ruler + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: ruler + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + name: ruler + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: ruler + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -distributor.health-check-ingesters=true + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -ingester.ring.zone-awareness-enabled=true + - -ruler-storage.backend=gcs + - -ruler-storage.gcs.bucket-name=rules-bucket + - -ruler.alertmanager-url=http://alertmanager.default.svc.cluster.local/alertmanager + - -ruler.max-rule-groups-per-tenant=35 + - -ruler.max-rules-per-rule-group=20 + - -ruler.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ruler.ring.store=consul + - -ruler.rule-path=/rules + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.prefix=multi-zone/ + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.zone-awareness-enabled=true + - -store.max-query-length=768h + - -target=ruler + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ruler + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + cpu: "16" + memory: 16Gi + requests: + cpu: "1" + memory: 6Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + terminationGracePeriodSeconds: 600 + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: alertmanager + name: alertmanager + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: alertmanager + serviceName: alertmanager + template: + metadata: + labels: + name: alertmanager + spec: + containers: + - args: + - -alertmanager-storage.backend=gcs + - -alertmanager-storage.gcs.bucket-name=alerts-bucket + - -alertmanager.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -alertmanager.sharding-ring.replication-factor=3 + - -alertmanager.sharding-ring.store=consul + - -alertmanager.storage.path=/data + - -alertmanager.web.external-url=http://test/alertmanager + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=alertmanager + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: alertmanager + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 15Gi + requests: + cpu: "2" + memory: 10Gi + volumeMounts: + - mountPath: /data + name: alertmanager-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 900 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: alertmanager-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: compactor + name: compactor + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + name: compactor + serviceName: compactor + template: + metadata: + labels: + name: compactor + spec: + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -compactor.block-ranges=2h,12h,24h + - -compactor.blocks-retention-period=0 + - -compactor.cleanup-interval=15m + - -compactor.compaction-concurrency=1 + - -compactor.compaction-interval=30m + - -compactor.compactor-tenant-shard-size=1 + - -compactor.data-dir=/data + - -compactor.deletion-delay=2h + - -compactor.max-closing-blocks-concurrency=2 + - -compactor.max-opening-blocks-concurrency=4 + - -compactor.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -compactor.ring.prefix= + - -compactor.ring.store=consul + - -compactor.ring.wait-stability-min-duration=1m + - -compactor.split-and-merge-shards=0 + - -compactor.split-groups=1 + - -compactor.symbols-flushers-concurrency=4 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=compactor + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: compactor + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 6Gi + requests: + cpu: 1 + memory: 6Gi + volumeMounts: + - mountPath: /data + name: compactor-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 900 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: compactor-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 250Gi + storageClassName: standard +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: + rollout-max-unavailable: "10" + labels: + rollout-group: ingester + name: ingester-zone-a + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + name: ingester-zone-a + rollout-group: ingester + serviceName: ingester-zone-a + template: + metadata: + labels: + name: ingester-zone-a + rollout-group: ingester + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: rollout-group + operator: In + values: + - ingester + - key: name + operator: NotIn + values: + - ingester-zone-a + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -blocks-storage.tsdb.block-ranges-period=2h + - -blocks-storage.tsdb.dir=/data/tsdb + - -blocks-storage.tsdb.ship-interval=1m + - -distributor.health-check-ingesters=true + - -ingester.max-global-series-per-metric=20000 + - -ingester.max-global-series-per-user=150000 + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-period=15s + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.instance-availability-zone=zone-a + - -ingester.ring.num-tokens=512 + - -ingester.ring.prefix= + - -ingester.ring.readiness-check-ring-health=false + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -ingester.ring.tokens-file-path=/data/tokens + - -ingester.ring.unregister-on-shutdown=true + - -ingester.ring.zone-awareness-enabled=true + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc-max-concurrent-streams=10000 + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=ingester + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ingester + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 25Gi + requests: + cpu: "4" + memory: 15Gi + volumeMounts: + - mountPath: /data + name: ingester-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 1200 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: OnDelete + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: ingester-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi + storageClassName: fast +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: + rollout-max-unavailable: "10" + labels: + rollout-group: ingester + name: ingester-zone-b + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + name: ingester-zone-b + rollout-group: ingester + serviceName: ingester-zone-b + template: + metadata: + labels: + name: ingester-zone-b + rollout-group: ingester + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: rollout-group + operator: In + values: + - ingester + - key: name + operator: NotIn + values: + - ingester-zone-b + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -blocks-storage.tsdb.block-ranges-period=2h + - -blocks-storage.tsdb.dir=/data/tsdb + - -blocks-storage.tsdb.ship-interval=1m + - -distributor.health-check-ingesters=true + - -ingester.max-global-series-per-metric=20000 + - -ingester.max-global-series-per-user=150000 + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-period=15s + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.instance-availability-zone=zone-b + - -ingester.ring.num-tokens=512 + - -ingester.ring.prefix= + - -ingester.ring.readiness-check-ring-health=false + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -ingester.ring.tokens-file-path=/data/tokens + - -ingester.ring.unregister-on-shutdown=true + - -ingester.ring.zone-awareness-enabled=true + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc-max-concurrent-streams=10000 + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=ingester + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ingester + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 25Gi + requests: + cpu: "4" + memory: 15Gi + volumeMounts: + - mountPath: /data + name: ingester-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 1200 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: OnDelete + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: ingester-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi + storageClassName: fast +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: + rollout-max-unavailable: "10" + labels: + rollout-group: ingester + name: ingester-zone-c + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + name: ingester-zone-c + rollout-group: ingester + serviceName: ingester-zone-c + template: + metadata: + labels: + name: ingester-zone-c + rollout-group: ingester + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: rollout-group + operator: In + values: + - ingester + - key: name + operator: NotIn + values: + - ingester-zone-c + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -blocks-storage.tsdb.block-ranges-period=2h + - -blocks-storage.tsdb.dir=/data/tsdb + - -blocks-storage.tsdb.ship-interval=1m + - -distributor.health-check-ingesters=true + - -ingester.max-global-series-per-metric=20000 + - -ingester.max-global-series-per-user=150000 + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-period=15s + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.instance-availability-zone=zone-c + - -ingester.ring.num-tokens=512 + - -ingester.ring.prefix= + - -ingester.ring.readiness-check-ring-health=false + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -ingester.ring.tokens-file-path=/data/tokens + - -ingester.ring.unregister-on-shutdown=true + - -ingester.ring.zone-awareness-enabled=true + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc-max-concurrent-streams=10000 + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=ingester + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ingester + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 25Gi + requests: + cpu: "4" + memory: 15Gi + volumeMounts: + - mountPath: /data + name: ingester-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 1200 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: OnDelete + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: ingester-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi + storageClassName: fast +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached + serviceName: memcached + template: + metadata: + labels: + name: memcached + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 6144 + - -I 1m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 9Gi + requests: + cpu: 500m + memory: 6552Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-frontend + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached-frontend + serviceName: memcached-frontend + template: + metadata: + labels: + name: memcached-frontend + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-frontend + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 1024 + - -I 5m + - -c 1024 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 1536Mi + requests: + cpu: 500m + memory: 1329Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-index-queries + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached-index-queries + serviceName: memcached-index-queries + template: + metadata: + labels: + name: memcached-index-queries + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-index-queries + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 1024 + - -I 5m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 1536Mi + requests: + cpu: 500m + memory: 1329Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-metadata + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + name: memcached-metadata + serviceName: memcached-metadata + template: + metadata: + labels: + name: memcached-metadata + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-metadata + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 512 + - -I 1m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 768Mi + requests: + cpu: 500m + memory: 715Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: + rollout-max-unavailable: "10" + labels: + rollout-group: store-gateway + name: store-gateway-zone-a + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + name: store-gateway-zone-a + rollout-group: store-gateway + serviceName: store-gateway-zone-a + template: + metadata: + labels: + name: store-gateway-zone-a + rollout-group: store-gateway + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: rollout-group + operator: In + values: + - store-gateway + - key: name + operator: NotIn + values: + - store-gateway-zone-a + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.chunks-cache.backend=memcached + - -blocks-storage.bucket-store.chunks-cache.memcached.addresses=dnssrvnoa+memcached.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms + - -blocks-storage.bucket-store.index-cache.backend=memcached + - -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880 + - -blocks-storage.bucket-store.index-header-lazy-loading-enabled=true + - -blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout=60m + - -blocks-storage.bucket-store.max-chunk-pool-bytes=12884901888 + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.instance-availability-zone=zone-a + - -store-gateway.sharding-ring.prefix=multi-zone/ + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.tokens-file-path=/data/tokens + - -store-gateway.sharding-ring.unregister-on-shutdown=false + - -store-gateway.sharding-ring.wait-stability-min-duration=1m + - -store-gateway.sharding-ring.zone-awareness-enabled=true + - -target=store-gateway + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: store-gateway + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 18Gi + requests: + cpu: "1" + memory: 12Gi + volumeMounts: + - mountPath: /data + name: store-gateway-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 120 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: OnDelete + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: store-gateway-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Gi + storageClassName: standard +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: + rollout-max-unavailable: "10" + labels: + rollout-group: store-gateway + name: store-gateway-zone-b + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + name: store-gateway-zone-b + rollout-group: store-gateway + serviceName: store-gateway-zone-b + template: + metadata: + labels: + name: store-gateway-zone-b + rollout-group: store-gateway + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: rollout-group + operator: In + values: + - store-gateway + - key: name + operator: NotIn + values: + - store-gateway-zone-b + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.chunks-cache.backend=memcached + - -blocks-storage.bucket-store.chunks-cache.memcached.addresses=dnssrvnoa+memcached.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms + - -blocks-storage.bucket-store.index-cache.backend=memcached + - -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880 + - -blocks-storage.bucket-store.index-header-lazy-loading-enabled=true + - -blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout=60m + - -blocks-storage.bucket-store.max-chunk-pool-bytes=12884901888 + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.instance-availability-zone=zone-b + - -store-gateway.sharding-ring.prefix=multi-zone/ + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.tokens-file-path=/data/tokens + - -store-gateway.sharding-ring.unregister-on-shutdown=false + - -store-gateway.sharding-ring.wait-stability-min-duration=1m + - -store-gateway.sharding-ring.zone-awareness-enabled=true + - -target=store-gateway + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: store-gateway + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 18Gi + requests: + cpu: "1" + memory: 12Gi + volumeMounts: + - mountPath: /data + name: store-gateway-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 120 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: OnDelete + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: store-gateway-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Gi + storageClassName: standard +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + annotations: + rollout-max-unavailable: "10" + labels: + rollout-group: store-gateway + name: store-gateway-zone-c + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + name: store-gateway-zone-c + rollout-group: store-gateway + serviceName: store-gateway-zone-c + template: + metadata: + labels: + name: store-gateway-zone-c + rollout-group: store-gateway + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: rollout-group + operator: In + values: + - store-gateway + - key: name + operator: NotIn + values: + - store-gateway-zone-c + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.chunks-cache.backend=memcached + - -blocks-storage.bucket-store.chunks-cache.memcached.addresses=dnssrvnoa+memcached.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms + - -blocks-storage.bucket-store.index-cache.backend=memcached + - -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880 + - -blocks-storage.bucket-store.index-header-lazy-loading-enabled=true + - -blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout=60m + - -blocks-storage.bucket-store.max-chunk-pool-bytes=12884901888 + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.instance-availability-zone=zone-c + - -store-gateway.sharding-ring.prefix=multi-zone/ + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.tokens-file-path=/data/tokens + - -store-gateway.sharding-ring.unregister-on-shutdown=false + - -store-gateway.sharding-ring.wait-stability-min-duration=1m + - -store-gateway.sharding-ring.zone-awareness-enabled=true + - -target=store-gateway + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: store-gateway + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 18Gi + requests: + cpu: "1" + memory: 12Gi + volumeMounts: + - mountPath: /data + name: store-gateway-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 120 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: OnDelete + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: store-gateway-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Gi + storageClassName: standard +--- +apiVersion: etcd.database.coreos.com/v1beta2 +kind: EtcdCluster +metadata: + annotations: + etcd.database.coreos.com/scope: clusterwide + name: etcd + namespace: default +spec: + pod: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + etcd_cluster: etcd + topologyKey: kubernetes.io/hostname + annotations: + prometheus.io/port: "2379" + prometheus.io/scrape: "true" + etcdEnv: + - name: ETCD_AUTO_COMPACTION_RETENTION + value: 1h + labels: + name: etcd + resources: + limits: + memory: 512Mi + requests: + cpu: 500m + memory: 512Mi + size: 3 + version: 3.3.13 diff --git a/operations/mimir-tests/test-consul-ruler-disabled-generated.yaml b/operations/mimir-tests/test-consul-ruler-disabled-generated.yaml new file mode 100644 index 0000000000..f81b267747 --- /dev/null +++ b/operations/mimir-tests/test-consul-ruler-disabled-generated.yaml @@ -0,0 +1,1660 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: default +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: alertmanager-pdb + name: alertmanager-pdb + namespace: default +spec: + maxUnavailable: 1 + selector: + matchLabels: + name: alertmanager +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: ingester-pdb + name: ingester-pdb + namespace: default +spec: + maxUnavailable: 1 + selector: + matchLabels: + name: ingester +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: store-gateway-pdb + name: store-gateway-pdb + namespace: default +spec: + maxUnavailable: 2 + selector: + matchLabels: + name: store-gateway +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: consul-sidekick + namespace: default +--- +apiVersion: v1 +data: + consul-config.json: '{"leave_on_terminate": true, "raft_snapshot_threshold": 128, + "raft_trailing_logs": 10000, "telemetry": {"dogstatsd_addr": "127.0.0.1:9125"}}' + mapping: | + mappings: + - match: consul.*.runtime.* + name: consul_runtime + labels: + type: $2 + - match: consul.runtime.total_gc_pause_ns + name: consul_runtime_total_gc_pause_ns + labels: + type: $2 + - match: consul.consul.health.service.query-tag.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3 + - match: consul.consul.health.service.query-tag.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4 + - match: consul.consul.health.service.query-tag.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11.$12 + - match: consul.consul.catalog.deregister + name: consul_catalog_deregister + labels: {} + - match: consul.consul.dns.domain_query.*.*.*.*.* + name: consul_dns_domain_query + labels: + query: $1.$2.$3.$4.$5 + - match: consul.consul.health.service.not-found.* + name: consul_health_service_not_found + labels: + query: $1 + - match: consul.consul.health.service.query.* + name: consul_health_service_query + labels: + query: $1 + - match: consul.*.memberlist.health.score + name: consul_memberlist_health_score + labels: {} + - match: consul.serf.queue.* + name: consul_serf_events + labels: + type: $1 + - match: consul.serf.snapshot.appendLine + name: consul_serf_snapshot_appendLine + labels: + type: $1 + - match: consul.serf.coordinate.adjustment-ms + name: consul_serf_coordinate_adjustment_ms + labels: {} + - match: consul.consul.rpc.query + name: consul_rpc_query + labels: {} + - match: consul.*.consul.session_ttl.active + name: consul_session_ttl_active + labels: {} + - match: consul.raft.rpc.* + name: consul_raft_rpc + labels: + type: $1 + - match: consul.raft.rpc.appendEntries.storeLogs + name: consul_raft_rpc_appendEntries_storeLogs + labels: + type: $1 + - match: consul.consul.fsm.persist + name: consul_fsm_persist + labels: {} + - match: consul.raft.fsm.apply + name: consul_raft_fsm_apply + labels: {} + - match: consul.raft.leader.lastContact + name: consul_raft_leader_lastcontact + labels: {} + - match: consul.raft.leader.dispatchLog + name: consul_raft_leader_dispatchLog + labels: {} + - match: consul.raft.commitTime + name: consul_raft_commitTime + labels: {} + - match: consul.raft.replication.appendEntries.logs.*.*.*.* + name: consul_raft_replication_appendEntries_logs + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.raft.replication.appendEntries.rpc.*.*.*.* + name: consul_raft_replication_appendEntries_rpc + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.raft.replication.heartbeat.*.*.*.* + name: consul_raft_replication_heartbeat + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.consul.rpc.request + name: consul_rpc_requests + labels: {} + - match: consul.consul.rpc.accept_conn + name: consul_rpc_accept_conn + labels: {} + - match: consul.memberlist.udp.* + name: consul_memberlist_udp + labels: + type: $1 + - match: consul.memberlist.tcp.* + name: consul_memberlist_tcp + labels: + type: $1 + - match: consul.memberlist.gossip + name: consul_memberlist_gossip + labels: {} + - match: consul.memberlist.probeNode + name: consul_memberlist_probenode + labels: {} + - match: consul.memberlist.pushPullNode + name: consul_memberlist_pushpullnode + labels: {} + - match: consul.http.* + name: consul_http_request + labels: + method: $1 + path: / + - match: consul.http.*.* + name: consul_http_request + labels: + method: $1 + path: /$2 + - match: consul.http.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3 + - match: consul.http.*.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3/$4 + - match: consul.http.*.*.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3/$4/$5 + - match: consul.consul.leader.barrier + name: consul_leader_barrier + labels: {} + - match: consul.consul.leader.reconcileMember + name: consul_leader_reconcileMember + labels: {} + - match: consul.consul.leader.reconcile + name: consul_leader_reconcile + labels: {} + - match: consul.consul.fsm.coordinate.batch-update + name: consul_fsm_coordinate_batch_update + labels: {} + - match: consul.consul.fsm.autopilot + name: consul_fsm_autopilot + labels: {} + - match: consul.consul.fsm.kvs.cas + name: consul_fsm_kvs_cas + labels: {} + - match: consul.consul.fsm.register + name: consul_fsm_register + labels: {} + - match: consul.consul.fsm.deregister + name: consul_fsm_deregister + labels: {} + - match: consul.consul.fsm.tombstone.reap + name: consul_fsm_tombstone_reap + labels: {} + - match: consul.consul.catalog.register + name: consul_catalog_register + labels: {} + - match: consul.consul.catalog.deregister + name: consul_catalog_deregister + labels: {} + - match: consul.consul.leader.reapTombstones + name: consul_leader_reapTombstones + labels: {} +kind: ConfigMap +metadata: + name: consul + namespace: default +--- +apiVersion: v1 +data: + overrides.yaml: | + overrides: {} +kind: ConfigMap +metadata: + name: overrides + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: consul-sidekick + namespace: default +rules: +- apiGroups: + - "" + - extensions + - apps + resources: + - pods + - replicasets + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: consul-sidekick + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: consul-sidekick +subjects: +- kind: ServiceAccount + name: consul-sidekick + namespace: default +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: alertmanager + name: alertmanager + namespace: default +spec: + clusterIP: None + ports: + - name: alertmanager-http-metrics + port: 8080 + targetPort: 8080 + - name: alertmanager-grpc + port: 9095 + targetPort: 9095 + selector: + name: alertmanager +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: compactor + name: compactor + namespace: default +spec: + clusterIP: None + ports: + - name: compactor-http-metrics + port: 8080 + targetPort: 8080 + - name: compactor-grpc + port: 9095 + targetPort: 9095 + selector: + name: compactor +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: consul + name: consul + namespace: default +spec: + ports: + - name: consul-server + port: 8300 + targetPort: 8300 + - name: consul-serf + port: 8301 + targetPort: 8301 + - name: consul-client + port: 8400 + targetPort: 8400 + - name: consul-api + port: 8500 + targetPort: 8500 + - name: statsd-exporter-http-metrics + port: 8000 + targetPort: 8000 + - name: consul-exporter-http-metrics + port: 9107 + targetPort: 9107 + selector: + name: consul +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: distributor + name: distributor + namespace: default +spec: + clusterIP: None + ports: + - name: distributor-http-metrics + port: 8080 + targetPort: 8080 + - name: distributor-grpc + port: 9095 + targetPort: 9095 + selector: + name: distributor +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ingester + name: ingester + namespace: default +spec: + ports: + - name: ingester-http-metrics + port: 8080 + targetPort: 8080 + - name: ingester-grpc + port: 9095 + targetPort: 9095 + selector: + name: ingester +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached + name: memcached + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-frontend + name: memcached-frontend + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-index-queries + name: memcached-index-queries + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-index-queries +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-metadata + name: memcached-metadata + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-metadata +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: querier + name: querier + namespace: default +spec: + ports: + - name: querier-http-metrics + port: 8080 + targetPort: 8080 + - name: querier-grpc + port: 9095 + targetPort: 9095 + selector: + name: querier +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-frontend + name: query-frontend + namespace: default +spec: + ports: + - name: query-frontend-http-metrics + port: 8080 + targetPort: 8080 + - name: query-frontend-grpc + port: 9095 + targetPort: 9095 + selector: + name: query-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-frontend + name: query-frontend-discovery + namespace: default +spec: + clusterIP: None + ports: + - name: query-frontend-http-metrics + port: 8080 + targetPort: 8080 + - name: query-frontend-grpc + port: 9095 + targetPort: 9095 + publishNotReadyAddresses: true + selector: + name: query-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-scheduler + name: query-scheduler + namespace: default +spec: + ports: + - name: query-scheduler-http-metrics + port: 8080 + targetPort: 8080 + - name: query-scheduler-grpc + port: 9095 + targetPort: 9095 + selector: + name: query-scheduler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-scheduler + name: query-scheduler-discovery + namespace: default +spec: + clusterIP: None + ports: + - name: query-scheduler-http-metrics + port: 8080 + targetPort: 8080 + - name: query-scheduler-grpc + port: 9095 + targetPort: 9095 + publishNotReadyAddresses: true + selector: + name: query-scheduler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: store-gateway + name: store-gateway + namespace: default +spec: + ports: + - name: store-gateway-http-metrics + port: 8080 + targetPort: 8080 + - name: store-gateway-grpc + port: 9095 + targetPort: 9095 + selector: + name: store-gateway +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: consul + namespace: default +spec: + minReadySeconds: 10 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: consul + template: + metadata: + annotations: + consul-hash: e56ef6821a3557604caccaf6d5820239 + labels: + name: consul + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: consul + topologyKey: kubernetes.io/hostname + - labelSelector: + matchLabels: + name: ingester + namespaces: + - default + topologyKey: kubernetes.io/hostname + containers: + - args: + - agent + - -ui + - -server + - -client=0.0.0.0 + - -config-file=/etc/config/consul-config.json + - -bootstrap-expect=1 + - -ui-content-path=/default/consul/ + env: + - name: CHECKPOINT_DISABLE + value: "1" + image: consul:1.5.3 + imagePullPolicy: IfNotPresent + name: consul + ports: + - containerPort: 8300 + name: server + - containerPort: 8301 + name: serf + - containerPort: 8400 + name: client + - containerPort: 8500 + name: api + resources: + requests: + cpu: "4" + memory: 4Gi + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --namespace=$(POD_NAMESPACE) + - --pod-name=$(POD_NAME) + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + image: weaveworks/consul-sidekick:master-f18ad13 + imagePullPolicy: IfNotPresent + name: sidekick + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --web.listen-address=:8000 + - --statsd.mapping-config=/etc/config/mapping + image: prom/statsd-exporter:v0.12.2 + imagePullPolicy: IfNotPresent + name: statsd-exporter + ports: + - containerPort: 8000 + name: http-metrics + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --consul.server=localhost:8500 + - --web.listen-address=:9107 + - --consul.timeout=1s + - --no-consul.health-summary + - --consul.allow_stale + image: prom/consul-exporter:v0.5.0 + imagePullPolicy: IfNotPresent + name: consul-exporter + ports: + - containerPort: 9107 + name: http-metrics + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + serviceAccount: consul-sidekick + volumes: + - configMap: + name: consul + name: consul + - emptyDir: + medium: Memory + name: data +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: distributor + namespace: default +spec: + minReadySeconds: 10 + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: distributor + strategy: + rollingUpdate: + maxSurge: 5 + maxUnavailable: 1 + template: + metadata: + labels: + name: distributor + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: distributor + topologyKey: kubernetes.io/hostname + containers: + - args: + - -distributor.ha-tracker.enable=true + - -distributor.ha-tracker.enable-for-all-users=true + - -distributor.ha-tracker.etcd.endpoints=etcd-client.default.svc.cluster.local.:2379 + - -distributor.ha-tracker.prefix=prom_ha/ + - -distributor.ha-tracker.store=etcd + - -distributor.health-check-ingesters=true + - -distributor.ingestion-burst-size=200000 + - -distributor.ingestion-rate-limit=10000 + - -distributor.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -distributor.ring.prefix= + - -distributor.ring.store=consul + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -mem-ballast-size-bytes=1073741824 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.max-connection-age=2m + - -server.grpc.keepalive.max-connection-age-grace=5m + - -server.grpc.keepalive.max-connection-idle=1m + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=distributor + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: distributor + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 4Gi + requests: + cpu: "2" + memory: 2Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: querier + namespace: default +spec: + minReadySeconds: 10 + replicas: 6 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: querier + strategy: + rollingUpdate: + maxSurge: 5 + maxUnavailable: 1 + template: + metadata: + labels: + name: querier + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: querier + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -distributor.health-check-ingesters=true + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -mem-ballast-size-bytes=268435456 + - -querier.frontend-client.grpc-max-send-msg-size=104857600 + - -querier.max-concurrent=8 + - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -server.http-write-timeout=1m + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=consul + - -store.max-query-length=768h + - -target=querier + env: + - name: JAEGER_REPORTER_MAX_QUEUE_SIZE + value: "1024" + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: querier + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 24Gi + requests: + cpu: "1" + memory: 12Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: query-frontend + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: query-frontend + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + template: + metadata: + labels: + name: query-frontend + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: query-frontend + topologyKey: kubernetes.io/hostname + containers: + - args: + - -query-frontend.align-querier-with-step=false + - -query-frontend.cache-results=true + - -query-frontend.max-cache-freshness=10m + - -query-frontend.results-cache.backend=memcached + - -query-frontend.results-cache.memcached.addresses=dnssrvnoa+memcached-frontend.default.svc.cluster.local:11211 + - -query-frontend.results-cache.memcached.timeout=500ms + - -query-frontend.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -server.http-write-timeout=1m + - -store.max-query-length=12000h + - -target=query-frontend + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: query-frontend + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 1200Mi + requests: + cpu: "2" + memory: 600Mi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: query-scheduler + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: query-scheduler + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + name: query-scheduler + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: query-scheduler + topologyKey: kubernetes.io/hostname + containers: + - args: + - -query-scheduler.max-outstanding-requests-per-tenant=100 + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=query-scheduler + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: query-scheduler + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 2Gi + requests: + cpu: "2" + memory: 1Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: alertmanager + name: alertmanager + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: alertmanager + serviceName: alertmanager + template: + metadata: + labels: + name: alertmanager + spec: + containers: + - args: + - -alertmanager-storage.backend=gcs + - -alertmanager-storage.gcs.bucket-name=alerts-bucket + - -alertmanager.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -alertmanager.sharding-ring.replication-factor=3 + - -alertmanager.sharding-ring.store=consul + - -alertmanager.storage.path=/data + - -alertmanager.web.external-url=http://test/alertmanager + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=alertmanager + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: alertmanager + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 15Gi + requests: + cpu: "2" + memory: 10Gi + volumeMounts: + - mountPath: /data + name: alertmanager-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 900 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: alertmanager-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: compactor + name: compactor + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + name: compactor + serviceName: compactor + template: + metadata: + labels: + name: compactor + spec: + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -compactor.block-ranges=2h,12h,24h + - -compactor.blocks-retention-period=0 + - -compactor.cleanup-interval=15m + - -compactor.compaction-concurrency=1 + - -compactor.compaction-interval=30m + - -compactor.compactor-tenant-shard-size=1 + - -compactor.data-dir=/data + - -compactor.deletion-delay=2h + - -compactor.max-closing-blocks-concurrency=2 + - -compactor.max-opening-blocks-concurrency=4 + - -compactor.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -compactor.ring.prefix= + - -compactor.ring.store=consul + - -compactor.ring.wait-stability-min-duration=1m + - -compactor.split-and-merge-shards=0 + - -compactor.split-groups=1 + - -compactor.symbols-flushers-concurrency=4 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=compactor + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: compactor + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 6Gi + requests: + cpu: 1 + memory: 6Gi + volumeMounts: + - mountPath: /data + name: compactor-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 900 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: compactor-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 250Gi + storageClassName: standard +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: ingester + name: ingester + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 3 + selector: + matchLabels: + name: ingester + serviceName: ingester + template: + metadata: + labels: + name: ingester + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: ingester + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -blocks-storage.tsdb.block-ranges-period=2h + - -blocks-storage.tsdb.dir=/data/tsdb + - -blocks-storage.tsdb.ship-interval=1m + - -distributor.health-check-ingesters=true + - -ingester.max-global-series-per-metric=20000 + - -ingester.max-global-series-per-user=150000 + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-period=15s + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.num-tokens=512 + - -ingester.ring.prefix= + - -ingester.ring.readiness-check-ring-health=false + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -ingester.ring.tokens-file-path=/data/tokens + - -ingester.ring.unregister-on-shutdown=true + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc-max-concurrent-streams=10000 + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=ingester + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ingester + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 25Gi + requests: + cpu: "4" + memory: 15Gi + volumeMounts: + - mountPath: /data + name: ingester-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 1200 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: ingester-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi + storageClassName: fast +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached + serviceName: memcached + template: + metadata: + labels: + name: memcached + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 6144 + - -I 1m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 9Gi + requests: + cpu: 500m + memory: 6552Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-frontend + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached-frontend + serviceName: memcached-frontend + template: + metadata: + labels: + name: memcached-frontend + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-frontend + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 1024 + - -I 5m + - -c 1024 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 1536Mi + requests: + cpu: 500m + memory: 1329Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-index-queries + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached-index-queries + serviceName: memcached-index-queries + template: + metadata: + labels: + name: memcached-index-queries + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-index-queries + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 1024 + - -I 5m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 1536Mi + requests: + cpu: 500m + memory: 1329Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-metadata + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + name: memcached-metadata + serviceName: memcached-metadata + template: + metadata: + labels: + name: memcached-metadata + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-metadata + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 512 + - -I 1m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 768Mi + requests: + cpu: 500m + memory: 715Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: store-gateway + name: store-gateway + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 3 + selector: + matchLabels: + name: store-gateway + serviceName: store-gateway + template: + metadata: + labels: + name: store-gateway + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: store-gateway + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.chunks-cache.backend=memcached + - -blocks-storage.bucket-store.chunks-cache.memcached.addresses=dnssrvnoa+memcached.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms + - -blocks-storage.bucket-store.index-cache.backend=memcached + - -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880 + - -blocks-storage.bucket-store.index-header-lazy-loading-enabled=true + - -blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout=60m + - -blocks-storage.bucket-store.max-chunk-pool-bytes=12884901888 + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.tokens-file-path=/data/tokens + - -store-gateway.sharding-ring.wait-stability-min-duration=1m + - -target=store-gateway + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: store-gateway + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 18Gi + requests: + cpu: "1" + memory: 12Gi + volumeMounts: + - mountPath: /data + name: store-gateway-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 120 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: store-gateway-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Gi + storageClassName: standard +--- +apiVersion: etcd.database.coreos.com/v1beta2 +kind: EtcdCluster +metadata: + annotations: + etcd.database.coreos.com/scope: clusterwide + name: etcd + namespace: default +spec: + pod: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + etcd_cluster: etcd + topologyKey: kubernetes.io/hostname + annotations: + prometheus.io/port: "2379" + prometheus.io/scrape: "true" + etcdEnv: + - name: ETCD_AUTO_COMPACTION_RETENTION + value: 1h + labels: + name: etcd + resources: + limits: + memory: 512Mi + requests: + cpu: 500m + memory: 512Mi + size: 3 + version: 3.3.13 diff --git a/operations/mimir-tests/test-memberlist-migration-step-0-before-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-0-before-generated.yaml new file mode 100644 index 0000000000..e64d662b4d --- /dev/null +++ b/operations/mimir-tests/test-memberlist-migration-step-0-before-generated.yaml @@ -0,0 +1,1770 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: default +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: alertmanager-pdb + name: alertmanager-pdb + namespace: default +spec: + maxUnavailable: 1 + selector: + matchLabels: + name: alertmanager +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: ingester-pdb + name: ingester-pdb + namespace: default +spec: + maxUnavailable: 1 + selector: + matchLabels: + name: ingester +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: store-gateway-pdb + name: store-gateway-pdb + namespace: default +spec: + maxUnavailable: 2 + selector: + matchLabels: + name: store-gateway +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: consul-sidekick + namespace: default +--- +apiVersion: v1 +data: + consul-config.json: '{"leave_on_terminate": true, "raft_snapshot_threshold": 128, + "raft_trailing_logs": 10000, "telemetry": {"dogstatsd_addr": "127.0.0.1:9125"}}' + mapping: | + mappings: + - match: consul.*.runtime.* + name: consul_runtime + labels: + type: $2 + - match: consul.runtime.total_gc_pause_ns + name: consul_runtime_total_gc_pause_ns + labels: + type: $2 + - match: consul.consul.health.service.query-tag.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3 + - match: consul.consul.health.service.query-tag.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4 + - match: consul.consul.health.service.query-tag.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11.$12 + - match: consul.consul.catalog.deregister + name: consul_catalog_deregister + labels: {} + - match: consul.consul.dns.domain_query.*.*.*.*.* + name: consul_dns_domain_query + labels: + query: $1.$2.$3.$4.$5 + - match: consul.consul.health.service.not-found.* + name: consul_health_service_not_found + labels: + query: $1 + - match: consul.consul.health.service.query.* + name: consul_health_service_query + labels: + query: $1 + - match: consul.*.memberlist.health.score + name: consul_memberlist_health_score + labels: {} + - match: consul.serf.queue.* + name: consul_serf_events + labels: + type: $1 + - match: consul.serf.snapshot.appendLine + name: consul_serf_snapshot_appendLine + labels: + type: $1 + - match: consul.serf.coordinate.adjustment-ms + name: consul_serf_coordinate_adjustment_ms + labels: {} + - match: consul.consul.rpc.query + name: consul_rpc_query + labels: {} + - match: consul.*.consul.session_ttl.active + name: consul_session_ttl_active + labels: {} + - match: consul.raft.rpc.* + name: consul_raft_rpc + labels: + type: $1 + - match: consul.raft.rpc.appendEntries.storeLogs + name: consul_raft_rpc_appendEntries_storeLogs + labels: + type: $1 + - match: consul.consul.fsm.persist + name: consul_fsm_persist + labels: {} + - match: consul.raft.fsm.apply + name: consul_raft_fsm_apply + labels: {} + - match: consul.raft.leader.lastContact + name: consul_raft_leader_lastcontact + labels: {} + - match: consul.raft.leader.dispatchLog + name: consul_raft_leader_dispatchLog + labels: {} + - match: consul.raft.commitTime + name: consul_raft_commitTime + labels: {} + - match: consul.raft.replication.appendEntries.logs.*.*.*.* + name: consul_raft_replication_appendEntries_logs + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.raft.replication.appendEntries.rpc.*.*.*.* + name: consul_raft_replication_appendEntries_rpc + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.raft.replication.heartbeat.*.*.*.* + name: consul_raft_replication_heartbeat + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.consul.rpc.request + name: consul_rpc_requests + labels: {} + - match: consul.consul.rpc.accept_conn + name: consul_rpc_accept_conn + labels: {} + - match: consul.memberlist.udp.* + name: consul_memberlist_udp + labels: + type: $1 + - match: consul.memberlist.tcp.* + name: consul_memberlist_tcp + labels: + type: $1 + - match: consul.memberlist.gossip + name: consul_memberlist_gossip + labels: {} + - match: consul.memberlist.probeNode + name: consul_memberlist_probenode + labels: {} + - match: consul.memberlist.pushPullNode + name: consul_memberlist_pushpullnode + labels: {} + - match: consul.http.* + name: consul_http_request + labels: + method: $1 + path: / + - match: consul.http.*.* + name: consul_http_request + labels: + method: $1 + path: /$2 + - match: consul.http.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3 + - match: consul.http.*.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3/$4 + - match: consul.http.*.*.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3/$4/$5 + - match: consul.consul.leader.barrier + name: consul_leader_barrier + labels: {} + - match: consul.consul.leader.reconcileMember + name: consul_leader_reconcileMember + labels: {} + - match: consul.consul.leader.reconcile + name: consul_leader_reconcile + labels: {} + - match: consul.consul.fsm.coordinate.batch-update + name: consul_fsm_coordinate_batch_update + labels: {} + - match: consul.consul.fsm.autopilot + name: consul_fsm_autopilot + labels: {} + - match: consul.consul.fsm.kvs.cas + name: consul_fsm_kvs_cas + labels: {} + - match: consul.consul.fsm.register + name: consul_fsm_register + labels: {} + - match: consul.consul.fsm.deregister + name: consul_fsm_deregister + labels: {} + - match: consul.consul.fsm.tombstone.reap + name: consul_fsm_tombstone_reap + labels: {} + - match: consul.consul.catalog.register + name: consul_catalog_register + labels: {} + - match: consul.consul.catalog.deregister + name: consul_catalog_deregister + labels: {} + - match: consul.consul.leader.reapTombstones + name: consul_leader_reapTombstones + labels: {} +kind: ConfigMap +metadata: + name: consul + namespace: default +--- +apiVersion: v1 +data: + overrides.yaml: | + overrides: {} +kind: ConfigMap +metadata: + name: overrides + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: consul-sidekick + namespace: default +rules: +- apiGroups: + - "" + - extensions + - apps + resources: + - pods + - replicasets + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: consul-sidekick + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: consul-sidekick +subjects: +- kind: ServiceAccount + name: consul-sidekick + namespace: default +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: alertmanager + name: alertmanager + namespace: default +spec: + clusterIP: None + ports: + - name: alertmanager-http-metrics + port: 8080 + targetPort: 8080 + - name: alertmanager-grpc + port: 9095 + targetPort: 9095 + selector: + name: alertmanager +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: compactor + name: compactor + namespace: default +spec: + clusterIP: None + ports: + - name: compactor-http-metrics + port: 8080 + targetPort: 8080 + - name: compactor-grpc + port: 9095 + targetPort: 9095 + selector: + name: compactor +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: consul + name: consul + namespace: default +spec: + ports: + - name: consul-server + port: 8300 + targetPort: 8300 + - name: consul-serf + port: 8301 + targetPort: 8301 + - name: consul-client + port: 8400 + targetPort: 8400 + - name: consul-api + port: 8500 + targetPort: 8500 + - name: statsd-exporter-http-metrics + port: 8000 + targetPort: 8000 + - name: consul-exporter-http-metrics + port: 9107 + targetPort: 9107 + selector: + name: consul +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: distributor + name: distributor + namespace: default +spec: + clusterIP: None + ports: + - name: distributor-http-metrics + port: 8080 + targetPort: 8080 + - name: distributor-grpc + port: 9095 + targetPort: 9095 + selector: + name: distributor +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ingester + name: ingester + namespace: default +spec: + ports: + - name: ingester-http-metrics + port: 8080 + targetPort: 8080 + - name: ingester-grpc + port: 9095 + targetPort: 9095 + selector: + name: ingester +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached + name: memcached + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-frontend + name: memcached-frontend + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-index-queries + name: memcached-index-queries + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-index-queries +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-metadata + name: memcached-metadata + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-metadata +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: querier + name: querier + namespace: default +spec: + ports: + - name: querier-http-metrics + port: 8080 + targetPort: 8080 + - name: querier-grpc + port: 9095 + targetPort: 9095 + selector: + name: querier +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-frontend + name: query-frontend + namespace: default +spec: + ports: + - name: query-frontend-http-metrics + port: 8080 + targetPort: 8080 + - name: query-frontend-grpc + port: 9095 + targetPort: 9095 + selector: + name: query-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-frontend + name: query-frontend-discovery + namespace: default +spec: + clusterIP: None + ports: + - name: query-frontend-http-metrics + port: 8080 + targetPort: 8080 + - name: query-frontend-grpc + port: 9095 + targetPort: 9095 + publishNotReadyAddresses: true + selector: + name: query-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-scheduler + name: query-scheduler + namespace: default +spec: + ports: + - name: query-scheduler-http-metrics + port: 8080 + targetPort: 8080 + - name: query-scheduler-grpc + port: 9095 + targetPort: 9095 + selector: + name: query-scheduler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-scheduler + name: query-scheduler-discovery + namespace: default +spec: + clusterIP: None + ports: + - name: query-scheduler-http-metrics + port: 8080 + targetPort: 8080 + - name: query-scheduler-grpc + port: 9095 + targetPort: 9095 + publishNotReadyAddresses: true + selector: + name: query-scheduler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ruler + name: ruler + namespace: default +spec: + ports: + - name: ruler-http-metrics + port: 8080 + targetPort: 8080 + - name: ruler-grpc + port: 9095 + targetPort: 9095 + selector: + name: ruler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: store-gateway + name: store-gateway + namespace: default +spec: + ports: + - name: store-gateway-http-metrics + port: 8080 + targetPort: 8080 + - name: store-gateway-grpc + port: 9095 + targetPort: 9095 + selector: + name: store-gateway +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: consul + namespace: default +spec: + minReadySeconds: 10 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: consul + template: + metadata: + annotations: + consul-hash: e56ef6821a3557604caccaf6d5820239 + labels: + name: consul + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: consul + topologyKey: kubernetes.io/hostname + - labelSelector: + matchLabels: + name: ingester + namespaces: + - default + topologyKey: kubernetes.io/hostname + containers: + - args: + - agent + - -ui + - -server + - -client=0.0.0.0 + - -config-file=/etc/config/consul-config.json + - -bootstrap-expect=1 + - -ui-content-path=/default/consul/ + env: + - name: CHECKPOINT_DISABLE + value: "1" + image: consul:1.5.3 + imagePullPolicy: IfNotPresent + name: consul + ports: + - containerPort: 8300 + name: server + - containerPort: 8301 + name: serf + - containerPort: 8400 + name: client + - containerPort: 8500 + name: api + resources: + requests: + cpu: "4" + memory: 4Gi + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --namespace=$(POD_NAMESPACE) + - --pod-name=$(POD_NAME) + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + image: weaveworks/consul-sidekick:master-f18ad13 + imagePullPolicy: IfNotPresent + name: sidekick + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --web.listen-address=:8000 + - --statsd.mapping-config=/etc/config/mapping + image: prom/statsd-exporter:v0.12.2 + imagePullPolicy: IfNotPresent + name: statsd-exporter + ports: + - containerPort: 8000 + name: http-metrics + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --consul.server=localhost:8500 + - --web.listen-address=:9107 + - --consul.timeout=1s + - --no-consul.health-summary + - --consul.allow_stale + image: prom/consul-exporter:v0.5.0 + imagePullPolicy: IfNotPresent + name: consul-exporter + ports: + - containerPort: 9107 + name: http-metrics + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + serviceAccount: consul-sidekick + volumes: + - configMap: + name: consul + name: consul + - emptyDir: + medium: Memory + name: data +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: distributor + namespace: default +spec: + minReadySeconds: 10 + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: distributor + strategy: + rollingUpdate: + maxSurge: 5 + maxUnavailable: 1 + template: + metadata: + labels: + name: distributor + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: distributor + topologyKey: kubernetes.io/hostname + containers: + - args: + - -distributor.ha-tracker.enable=true + - -distributor.ha-tracker.enable-for-all-users=true + - -distributor.ha-tracker.etcd.endpoints=etcd-client.default.svc.cluster.local.:2379 + - -distributor.ha-tracker.prefix=prom_ha/ + - -distributor.ha-tracker.store=etcd + - -distributor.health-check-ingesters=true + - -distributor.ingestion-burst-size=200000 + - -distributor.ingestion-rate-limit=10000 + - -distributor.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -distributor.ring.prefix= + - -distributor.ring.store=consul + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -mem-ballast-size-bytes=1073741824 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.max-connection-age=2m + - -server.grpc.keepalive.max-connection-age-grace=5m + - -server.grpc.keepalive.max-connection-idle=1m + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=distributor + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: distributor + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 4Gi + requests: + cpu: "2" + memory: 2Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: querier + namespace: default +spec: + minReadySeconds: 10 + replicas: 6 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: querier + strategy: + rollingUpdate: + maxSurge: 5 + maxUnavailable: 1 + template: + metadata: + labels: + name: querier + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: querier + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -distributor.health-check-ingesters=true + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -mem-ballast-size-bytes=268435456 + - -querier.frontend-client.grpc-max-send-msg-size=104857600 + - -querier.max-concurrent=8 + - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -server.http-write-timeout=1m + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=consul + - -store.max-query-length=768h + - -target=querier + env: + - name: JAEGER_REPORTER_MAX_QUEUE_SIZE + value: "1024" + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: querier + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 24Gi + requests: + cpu: "1" + memory: 12Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: query-frontend + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: query-frontend + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + template: + metadata: + labels: + name: query-frontend + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: query-frontend + topologyKey: kubernetes.io/hostname + containers: + - args: + - -query-frontend.align-querier-with-step=false + - -query-frontend.cache-results=true + - -query-frontend.max-cache-freshness=10m + - -query-frontend.results-cache.backend=memcached + - -query-frontend.results-cache.memcached.addresses=dnssrvnoa+memcached-frontend.default.svc.cluster.local:11211 + - -query-frontend.results-cache.memcached.timeout=500ms + - -query-frontend.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -server.http-write-timeout=1m + - -store.max-query-length=12000h + - -target=query-frontend + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: query-frontend + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 1200Mi + requests: + cpu: "2" + memory: 600Mi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: query-scheduler + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: query-scheduler + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + name: query-scheduler + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: query-scheduler + topologyKey: kubernetes.io/hostname + containers: + - args: + - -query-scheduler.max-outstanding-requests-per-tenant=100 + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=query-scheduler + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: query-scheduler + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 2Gi + requests: + cpu: "2" + memory: 1Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ruler + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: ruler + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + name: ruler + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: ruler + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -distributor.health-check-ingesters=true + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -ruler-storage.backend=gcs + - -ruler-storage.gcs.bucket-name=rules-bucket + - -ruler.alertmanager-url=http://alertmanager.default.svc.cluster.local/alertmanager + - -ruler.max-rule-groups-per-tenant=35 + - -ruler.max-rules-per-rule-group=20 + - -ruler.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ruler.ring.store=consul + - -ruler.rule-path=/rules + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=consul + - -store.max-query-length=768h + - -target=ruler + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ruler + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + cpu: "16" + memory: 16Gi + requests: + cpu: "1" + memory: 6Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + terminationGracePeriodSeconds: 600 + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: alertmanager + name: alertmanager + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: alertmanager + serviceName: alertmanager + template: + metadata: + labels: + name: alertmanager + spec: + containers: + - args: + - -alertmanager-storage.backend=gcs + - -alertmanager-storage.gcs.bucket-name=alerts-bucket + - -alertmanager.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -alertmanager.sharding-ring.replication-factor=3 + - -alertmanager.sharding-ring.store=consul + - -alertmanager.storage.path=/data + - -alertmanager.web.external-url=http://test/alertmanager + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=alertmanager + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: alertmanager + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 15Gi + requests: + cpu: "2" + memory: 10Gi + volumeMounts: + - mountPath: /data + name: alertmanager-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 900 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: alertmanager-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: compactor + name: compactor + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + name: compactor + serviceName: compactor + template: + metadata: + labels: + name: compactor + spec: + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -compactor.block-ranges=2h,12h,24h + - -compactor.blocks-retention-period=0 + - -compactor.cleanup-interval=15m + - -compactor.compaction-concurrency=1 + - -compactor.compaction-interval=30m + - -compactor.compactor-tenant-shard-size=1 + - -compactor.data-dir=/data + - -compactor.deletion-delay=2h + - -compactor.max-closing-blocks-concurrency=2 + - -compactor.max-opening-blocks-concurrency=4 + - -compactor.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -compactor.ring.prefix= + - -compactor.ring.store=consul + - -compactor.ring.wait-stability-min-duration=1m + - -compactor.split-and-merge-shards=0 + - -compactor.split-groups=1 + - -compactor.symbols-flushers-concurrency=4 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=compactor + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: compactor + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 6Gi + requests: + cpu: 1 + memory: 6Gi + volumeMounts: + - mountPath: /data + name: compactor-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 900 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: compactor-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 250Gi + storageClassName: standard +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: ingester + name: ingester + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 3 + selector: + matchLabels: + name: ingester + serviceName: ingester + template: + metadata: + labels: + name: ingester + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: ingester + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -blocks-storage.tsdb.block-ranges-period=2h + - -blocks-storage.tsdb.dir=/data/tsdb + - -blocks-storage.tsdb.ship-interval=1m + - -distributor.health-check-ingesters=true + - -ingester.max-global-series-per-metric=20000 + - -ingester.max-global-series-per-user=150000 + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-period=15s + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.num-tokens=512 + - -ingester.ring.prefix= + - -ingester.ring.readiness-check-ring-health=false + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=consul + - -ingester.ring.tokens-file-path=/data/tokens + - -ingester.ring.unregister-on-shutdown=true + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc-max-concurrent-streams=10000 + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=ingester + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ingester + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 25Gi + requests: + cpu: "4" + memory: 15Gi + volumeMounts: + - mountPath: /data + name: ingester-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 1200 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: ingester-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi + storageClassName: fast +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached + serviceName: memcached + template: + metadata: + labels: + name: memcached + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 6144 + - -I 1m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 9Gi + requests: + cpu: 500m + memory: 6552Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-frontend + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached-frontend + serviceName: memcached-frontend + template: + metadata: + labels: + name: memcached-frontend + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-frontend + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 1024 + - -I 5m + - -c 1024 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 1536Mi + requests: + cpu: 500m + memory: 1329Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-index-queries + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached-index-queries + serviceName: memcached-index-queries + template: + metadata: + labels: + name: memcached-index-queries + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-index-queries + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 1024 + - -I 5m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 1536Mi + requests: + cpu: 500m + memory: 1329Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-metadata + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + name: memcached-metadata + serviceName: memcached-metadata + template: + metadata: + labels: + name: memcached-metadata + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-metadata + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 512 + - -I 1m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 768Mi + requests: + cpu: 500m + memory: 715Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: store-gateway + name: store-gateway + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 3 + selector: + matchLabels: + name: store-gateway + serviceName: store-gateway + template: + metadata: + labels: + name: store-gateway + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: store-gateway + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.chunks-cache.backend=memcached + - -blocks-storage.bucket-store.chunks-cache.memcached.addresses=dnssrvnoa+memcached.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms + - -blocks-storage.bucket-store.index-cache.backend=memcached + - -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880 + - -blocks-storage.bucket-store.index-header-lazy-loading-enabled=true + - -blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout=60m + - -blocks-storage.bucket-store.max-chunk-pool-bytes=12884901888 + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.tokens-file-path=/data/tokens + - -store-gateway.sharding-ring.wait-stability-min-duration=1m + - -target=store-gateway + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: store-gateway + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 18Gi + requests: + cpu: "1" + memory: 12Gi + volumeMounts: + - mountPath: /data + name: store-gateway-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 120 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: store-gateway-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Gi + storageClassName: standard +--- +apiVersion: etcd.database.coreos.com/v1beta2 +kind: EtcdCluster +metadata: + annotations: + etcd.database.coreos.com/scope: clusterwide + name: etcd + namespace: default +spec: + pod: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + etcd_cluster: etcd + topologyKey: kubernetes.io/hostname + annotations: + prometheus.io/port: "2379" + prometheus.io/scrape: "true" + etcdEnv: + - name: ETCD_AUTO_COMPACTION_RETENTION + value: 1h + labels: + name: etcd + resources: + limits: + memory: 512Mi + requests: + cpu: 500m + memory: 512Mi + size: 3 + version: 3.3.13 diff --git a/operations/mimir-tests/test-memberlist-migration-step-0-before.jsonnet b/operations/mimir-tests/test-memberlist-migration-step-0-before.jsonnet new file mode 100644 index 0000000000..b1967730c8 --- /dev/null +++ b/operations/mimir-tests/test-memberlist-migration-step-0-before.jsonnet @@ -0,0 +1,24 @@ +local mimir = import 'mimir/mimir.libsonnet'; + +mimir { + _config+:: { + namespace: 'default', + external_url: 'http://test', + + blocks_storage_backend: 'gcs', + blocks_storage_bucket_name: 'blocks-bucket', + bucket_index_enabled: true, + query_scheduler_enabled: true, + + ruler_enabled: true, + ruler_client_type: 'gcs', + ruler_storage_bucket_name: 'rules-bucket', + + alertmanager_enabled: true, + alertmanager_client_type: 'gcs', + alertmanager_gcs_bucket_name: 'alerts-bucket', + + // Step 0: State before the migration: memberlist is disabled. + memberlist_ring_enabled: false, + }, +} diff --git a/operations/mimir-tests/test-gossip-multikv-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-1-generated.yaml similarity index 100% rename from operations/mimir-tests/test-gossip-multikv-generated.yaml rename to operations/mimir-tests/test-memberlist-migration-step-1-generated.yaml diff --git a/operations/mimir-tests/test-gossip-multikv-switch-primary-secondary.jsonnet b/operations/mimir-tests/test-memberlist-migration-step-1.jsonnet similarity index 90% rename from operations/mimir-tests/test-gossip-multikv-switch-primary-secondary.jsonnet rename to operations/mimir-tests/test-memberlist-migration-step-1.jsonnet index 29d293433f..70d61a6c7e 100644 --- a/operations/mimir-tests/test-gossip-multikv-switch-primary-secondary.jsonnet +++ b/operations/mimir-tests/test-memberlist-migration-step-1.jsonnet @@ -5,8 +5,6 @@ mimir { namespace: 'default', external_url: 'http://test', - memberlist_ring_enabled: true, - blocks_storage_backend: 'gcs', blocks_storage_bucket_name: 'blocks-bucket', bucket_index_enabled: true, @@ -20,7 +18,8 @@ mimir { alertmanager_client_type: 'gcs', alertmanager_gcs_bucket_name: 'alerts-bucket', + // Step 1: start migration from consul to memberlist. + memberlist_ring_enabled: true, multikv_migration_enabled: true, - multikv_switch_primary_secondary: true, }, } diff --git a/operations/mimir-tests/test-memberlist-migration-step-2-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-2-generated.yaml new file mode 100644 index 0000000000..f2ae658fb8 --- /dev/null +++ b/operations/mimir-tests/test-memberlist-migration-step-2-generated.yaml @@ -0,0 +1,1868 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: default +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: alertmanager-pdb + name: alertmanager-pdb + namespace: default +spec: + maxUnavailable: 1 + selector: + matchLabels: + name: alertmanager +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: ingester-pdb + name: ingester-pdb + namespace: default +spec: + maxUnavailable: 1 + selector: + matchLabels: + name: ingester +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: store-gateway-pdb + name: store-gateway-pdb + namespace: default +spec: + maxUnavailable: 2 + selector: + matchLabels: + name: store-gateway +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: consul-sidekick + namespace: default +--- +apiVersion: v1 +data: + consul-config.json: '{"leave_on_terminate": true, "raft_snapshot_threshold": 128, + "raft_trailing_logs": 10000, "telemetry": {"dogstatsd_addr": "127.0.0.1:9125"}}' + mapping: | + mappings: + - match: consul.*.runtime.* + name: consul_runtime + labels: + type: $2 + - match: consul.runtime.total_gc_pause_ns + name: consul_runtime_total_gc_pause_ns + labels: + type: $2 + - match: consul.consul.health.service.query-tag.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3 + - match: consul.consul.health.service.query-tag.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4 + - match: consul.consul.health.service.query-tag.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11.$12 + - match: consul.consul.catalog.deregister + name: consul_catalog_deregister + labels: {} + - match: consul.consul.dns.domain_query.*.*.*.*.* + name: consul_dns_domain_query + labels: + query: $1.$2.$3.$4.$5 + - match: consul.consul.health.service.not-found.* + name: consul_health_service_not_found + labels: + query: $1 + - match: consul.consul.health.service.query.* + name: consul_health_service_query + labels: + query: $1 + - match: consul.*.memberlist.health.score + name: consul_memberlist_health_score + labels: {} + - match: consul.serf.queue.* + name: consul_serf_events + labels: + type: $1 + - match: consul.serf.snapshot.appendLine + name: consul_serf_snapshot_appendLine + labels: + type: $1 + - match: consul.serf.coordinate.adjustment-ms + name: consul_serf_coordinate_adjustment_ms + labels: {} + - match: consul.consul.rpc.query + name: consul_rpc_query + labels: {} + - match: consul.*.consul.session_ttl.active + name: consul_session_ttl_active + labels: {} + - match: consul.raft.rpc.* + name: consul_raft_rpc + labels: + type: $1 + - match: consul.raft.rpc.appendEntries.storeLogs + name: consul_raft_rpc_appendEntries_storeLogs + labels: + type: $1 + - match: consul.consul.fsm.persist + name: consul_fsm_persist + labels: {} + - match: consul.raft.fsm.apply + name: consul_raft_fsm_apply + labels: {} + - match: consul.raft.leader.lastContact + name: consul_raft_leader_lastcontact + labels: {} + - match: consul.raft.leader.dispatchLog + name: consul_raft_leader_dispatchLog + labels: {} + - match: consul.raft.commitTime + name: consul_raft_commitTime + labels: {} + - match: consul.raft.replication.appendEntries.logs.*.*.*.* + name: consul_raft_replication_appendEntries_logs + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.raft.replication.appendEntries.rpc.*.*.*.* + name: consul_raft_replication_appendEntries_rpc + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.raft.replication.heartbeat.*.*.*.* + name: consul_raft_replication_heartbeat + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.consul.rpc.request + name: consul_rpc_requests + labels: {} + - match: consul.consul.rpc.accept_conn + name: consul_rpc_accept_conn + labels: {} + - match: consul.memberlist.udp.* + name: consul_memberlist_udp + labels: + type: $1 + - match: consul.memberlist.tcp.* + name: consul_memberlist_tcp + labels: + type: $1 + - match: consul.memberlist.gossip + name: consul_memberlist_gossip + labels: {} + - match: consul.memberlist.probeNode + name: consul_memberlist_probenode + labels: {} + - match: consul.memberlist.pushPullNode + name: consul_memberlist_pushpullnode + labels: {} + - match: consul.http.* + name: consul_http_request + labels: + method: $1 + path: / + - match: consul.http.*.* + name: consul_http_request + labels: + method: $1 + path: /$2 + - match: consul.http.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3 + - match: consul.http.*.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3/$4 + - match: consul.http.*.*.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3/$4/$5 + - match: consul.consul.leader.barrier + name: consul_leader_barrier + labels: {} + - match: consul.consul.leader.reconcileMember + name: consul_leader_reconcileMember + labels: {} + - match: consul.consul.leader.reconcile + name: consul_leader_reconcile + labels: {} + - match: consul.consul.fsm.coordinate.batch-update + name: consul_fsm_coordinate_batch_update + labels: {} + - match: consul.consul.fsm.autopilot + name: consul_fsm_autopilot + labels: {} + - match: consul.consul.fsm.kvs.cas + name: consul_fsm_kvs_cas + labels: {} + - match: consul.consul.fsm.register + name: consul_fsm_register + labels: {} + - match: consul.consul.fsm.deregister + name: consul_fsm_deregister + labels: {} + - match: consul.consul.fsm.tombstone.reap + name: consul_fsm_tombstone_reap + labels: {} + - match: consul.consul.catalog.register + name: consul_catalog_register + labels: {} + - match: consul.consul.catalog.deregister + name: consul_catalog_deregister + labels: {} + - match: consul.consul.leader.reapTombstones + name: consul_leader_reapTombstones + labels: {} +kind: ConfigMap +metadata: + name: consul + namespace: default +--- +apiVersion: v1 +data: + overrides.yaml: | + multi_kv_config: + mirror_enabled: true + primary: consul + overrides: {} +kind: ConfigMap +metadata: + name: overrides + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: consul-sidekick + namespace: default +rules: +- apiGroups: + - "" + - extensions + - apps + resources: + - pods + - replicasets + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: consul-sidekick + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: consul-sidekick +subjects: +- kind: ServiceAccount + name: consul-sidekick + namespace: default +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: alertmanager + name: alertmanager + namespace: default +spec: + clusterIP: None + ports: + - name: alertmanager-http-metrics + port: 8080 + targetPort: 8080 + - name: alertmanager-grpc + port: 9095 + targetPort: 9095 + - name: alertmanager-gossip-ring + port: 7946 + targetPort: 7946 + selector: + name: alertmanager +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: compactor + name: compactor + namespace: default +spec: + clusterIP: None + ports: + - name: compactor-http-metrics + port: 8080 + targetPort: 8080 + - name: compactor-grpc + port: 9095 + targetPort: 9095 + - name: compactor-gossip-ring + port: 7946 + targetPort: 7946 + selector: + name: compactor +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: consul + name: consul + namespace: default +spec: + ports: + - name: consul-server + port: 8300 + targetPort: 8300 + - name: consul-serf + port: 8301 + targetPort: 8301 + - name: consul-client + port: 8400 + targetPort: 8400 + - name: consul-api + port: 8500 + targetPort: 8500 + - name: statsd-exporter-http-metrics + port: 8000 + targetPort: 8000 + - name: consul-exporter-http-metrics + port: 9107 + targetPort: 9107 + selector: + name: consul +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: distributor + name: distributor + namespace: default +spec: + clusterIP: None + ports: + - name: distributor-http-metrics + port: 8080 + targetPort: 8080 + - name: distributor-grpc + port: 9095 + targetPort: 9095 + - name: distributor-gossip-ring + port: 7946 + targetPort: 7946 + selector: + name: distributor +--- +apiVersion: v1 +kind: Service +metadata: + name: gossip-ring + namespace: default +spec: + clusterIP: None + ports: + - name: gossip-ring + port: 7946 + protocol: TCP + targetPort: 7946 + selector: + gossip_ring_member: "true" +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ingester + name: ingester + namespace: default +spec: + ports: + - name: ingester-http-metrics + port: 8080 + targetPort: 8080 + - name: ingester-grpc + port: 9095 + targetPort: 9095 + - name: ingester-gossip-ring + port: 7946 + targetPort: 7946 + selector: + name: ingester +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached + name: memcached + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-frontend + name: memcached-frontend + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-index-queries + name: memcached-index-queries + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-index-queries +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-metadata + name: memcached-metadata + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-metadata +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: querier + name: querier + namespace: default +spec: + ports: + - name: querier-http-metrics + port: 8080 + targetPort: 8080 + - name: querier-grpc + port: 9095 + targetPort: 9095 + - name: querier-gossip-ring + port: 7946 + targetPort: 7946 + selector: + name: querier +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-frontend + name: query-frontend + namespace: default +spec: + ports: + - name: query-frontend-http-metrics + port: 8080 + targetPort: 8080 + - name: query-frontend-grpc + port: 9095 + targetPort: 9095 + selector: + name: query-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-frontend + name: query-frontend-discovery + namespace: default +spec: + clusterIP: None + ports: + - name: query-frontend-http-metrics + port: 8080 + targetPort: 8080 + - name: query-frontend-grpc + port: 9095 + targetPort: 9095 + publishNotReadyAddresses: true + selector: + name: query-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-scheduler + name: query-scheduler + namespace: default +spec: + ports: + - name: query-scheduler-http-metrics + port: 8080 + targetPort: 8080 + - name: query-scheduler-grpc + port: 9095 + targetPort: 9095 + selector: + name: query-scheduler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-scheduler + name: query-scheduler-discovery + namespace: default +spec: + clusterIP: None + ports: + - name: query-scheduler-http-metrics + port: 8080 + targetPort: 8080 + - name: query-scheduler-grpc + port: 9095 + targetPort: 9095 + publishNotReadyAddresses: true + selector: + name: query-scheduler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ruler + name: ruler + namespace: default +spec: + ports: + - name: ruler-http-metrics + port: 8080 + targetPort: 8080 + - name: ruler-grpc + port: 9095 + targetPort: 9095 + selector: + name: ruler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: store-gateway + name: store-gateway + namespace: default +spec: + ports: + - name: store-gateway-http-metrics + port: 8080 + targetPort: 8080 + - name: store-gateway-grpc + port: 9095 + targetPort: 9095 + - name: store-gateway-gossip-ring + port: 7946 + targetPort: 7946 + selector: + name: store-gateway +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: consul + namespace: default +spec: + minReadySeconds: 10 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: consul + template: + metadata: + annotations: + consul-hash: e56ef6821a3557604caccaf6d5820239 + labels: + name: consul + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: consul + topologyKey: kubernetes.io/hostname + - labelSelector: + matchLabels: + name: ingester + namespaces: + - default + topologyKey: kubernetes.io/hostname + containers: + - args: + - agent + - -ui + - -server + - -client=0.0.0.0 + - -config-file=/etc/config/consul-config.json + - -bootstrap-expect=1 + - -ui-content-path=/default/consul/ + env: + - name: CHECKPOINT_DISABLE + value: "1" + image: consul:1.5.3 + imagePullPolicy: IfNotPresent + name: consul + ports: + - containerPort: 8300 + name: server + - containerPort: 8301 + name: serf + - containerPort: 8400 + name: client + - containerPort: 8500 + name: api + resources: + requests: + cpu: "4" + memory: 4Gi + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --namespace=$(POD_NAMESPACE) + - --pod-name=$(POD_NAME) + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + image: weaveworks/consul-sidekick:master-f18ad13 + imagePullPolicy: IfNotPresent + name: sidekick + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --web.listen-address=:8000 + - --statsd.mapping-config=/etc/config/mapping + image: prom/statsd-exporter:v0.12.2 + imagePullPolicy: IfNotPresent + name: statsd-exporter + ports: + - containerPort: 8000 + name: http-metrics + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --consul.server=localhost:8500 + - --web.listen-address=:9107 + - --consul.timeout=1s + - --no-consul.health-summary + - --consul.allow_stale + image: prom/consul-exporter:v0.5.0 + imagePullPolicy: IfNotPresent + name: consul-exporter + ports: + - containerPort: 9107 + name: http-metrics + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + serviceAccount: consul-sidekick + volumes: + - configMap: + name: consul + name: consul + - emptyDir: + medium: Memory + name: data +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: distributor + namespace: default +spec: + minReadySeconds: 10 + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: distributor + strategy: + rollingUpdate: + maxSurge: 5 + maxUnavailable: 1 + template: + metadata: + labels: + gossip_ring_member: "true" + name: distributor + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: distributor + topologyKey: kubernetes.io/hostname + containers: + - args: + - -distributor.ha-tracker.enable=true + - -distributor.ha-tracker.enable-for-all-users=true + - -distributor.ha-tracker.etcd.endpoints=etcd-client.default.svc.cluster.local.:2379 + - -distributor.ha-tracker.prefix=prom_ha/ + - -distributor.ha-tracker.store=etcd + - -distributor.health-check-ingesters=true + - -distributor.ingestion-burst-size=200000 + - -distributor.ingestion-rate-limit=10000 + - -distributor.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -distributor.ring.multi.primary=consul + - -distributor.ring.multi.secondary=memberlist + - -distributor.ring.prefix= + - -distributor.ring.store=multi + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.multi.primary=consul + - -ingester.ring.multi.secondary=memberlist + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=multi + - -mem-ballast-size-bytes=1073741824 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.max-connection-age=2m + - -server.grpc.keepalive.max-connection-age-grace=5m + - -server.grpc.keepalive.max-connection-idle=1m + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=distributor + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: distributor + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + - containerPort: 7946 + name: gossip-ring + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 4Gi + requests: + cpu: "2" + memory: 2Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: querier + namespace: default +spec: + minReadySeconds: 10 + replicas: 6 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: querier + strategy: + rollingUpdate: + maxSurge: 5 + maxUnavailable: 1 + template: + metadata: + labels: + gossip_ring_member: "true" + name: querier + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: querier + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -distributor.health-check-ingesters=true + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.multi.primary=consul + - -ingester.ring.multi.secondary=memberlist + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=multi + - -mem-ballast-size-bytes=268435456 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 + - -querier.frontend-client.grpc-max-send-msg-size=104857600 + - -querier.max-concurrent=8 + - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -server.http-write-timeout=1m + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.multi.primary=consul + - -store-gateway.sharding-ring.multi.secondary=memberlist + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=multi + - -store.max-query-length=768h + - -target=querier + env: + - name: JAEGER_REPORTER_MAX_QUEUE_SIZE + value: "1024" + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: querier + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + - containerPort: 7946 + name: gossip-ring + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 24Gi + requests: + cpu: "1" + memory: 12Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: query-frontend + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: query-frontend + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + template: + metadata: + labels: + name: query-frontend + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: query-frontend + topologyKey: kubernetes.io/hostname + containers: + - args: + - -query-frontend.align-querier-with-step=false + - -query-frontend.cache-results=true + - -query-frontend.max-cache-freshness=10m + - -query-frontend.results-cache.backend=memcached + - -query-frontend.results-cache.memcached.addresses=dnssrvnoa+memcached-frontend.default.svc.cluster.local:11211 + - -query-frontend.results-cache.memcached.timeout=500ms + - -query-frontend.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -server.http-write-timeout=1m + - -store.max-query-length=12000h + - -target=query-frontend + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: query-frontend + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 1200Mi + requests: + cpu: "2" + memory: 600Mi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: query-scheduler + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: query-scheduler + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + name: query-scheduler + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: query-scheduler + topologyKey: kubernetes.io/hostname + containers: + - args: + - -query-scheduler.max-outstanding-requests-per-tenant=100 + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=query-scheduler + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: query-scheduler + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 2Gi + requests: + cpu: "2" + memory: 1Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ruler + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: ruler + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + gossip_ring_member: "true" + name: ruler + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: ruler + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -distributor.health-check-ingesters=true + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.multi.primary=consul + - -ingester.ring.multi.secondary=memberlist + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=multi + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 + - -ruler-storage.backend=gcs + - -ruler-storage.gcs.bucket-name=rules-bucket + - -ruler.alertmanager-url=http://alertmanager.default.svc.cluster.local/alertmanager + - -ruler.max-rule-groups-per-tenant=35 + - -ruler.max-rules-per-rule-group=20 + - -ruler.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ruler.ring.multi.primary=consul + - -ruler.ring.multi.secondary=memberlist + - -ruler.ring.store=multi + - -ruler.rule-path=/rules + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.multi.primary=consul + - -store-gateway.sharding-ring.multi.secondary=memberlist + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=multi + - -store.max-query-length=768h + - -target=ruler + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ruler + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + cpu: "16" + memory: 16Gi + requests: + cpu: "1" + memory: 6Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + terminationGracePeriodSeconds: 600 + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: alertmanager + name: alertmanager + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: alertmanager + serviceName: alertmanager + template: + metadata: + labels: + gossip_ring_member: "true" + name: alertmanager + spec: + containers: + - args: + - -alertmanager-storage.backend=gcs + - -alertmanager-storage.gcs.bucket-name=alerts-bucket + - -alertmanager.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -alertmanager.sharding-ring.multi.primary=consul + - -alertmanager.sharding-ring.multi.secondary=memberlist + - -alertmanager.sharding-ring.replication-factor=3 + - -alertmanager.sharding-ring.store=multi + - -alertmanager.storage.path=/data + - -alertmanager.web.external-url=http://test/alertmanager + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=alertmanager + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: alertmanager + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + - containerPort: 7946 + name: gossip-ring + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 15Gi + requests: + cpu: "2" + memory: 10Gi + volumeMounts: + - mountPath: /data + name: alertmanager-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 900 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: alertmanager-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: compactor + name: compactor + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + name: compactor + serviceName: compactor + template: + metadata: + labels: + gossip_ring_member: "true" + name: compactor + spec: + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -compactor.block-ranges=2h,12h,24h + - -compactor.blocks-retention-period=0 + - -compactor.cleanup-interval=15m + - -compactor.compaction-concurrency=1 + - -compactor.compaction-interval=30m + - -compactor.compactor-tenant-shard-size=1 + - -compactor.data-dir=/data + - -compactor.deletion-delay=2h + - -compactor.max-closing-blocks-concurrency=2 + - -compactor.max-opening-blocks-concurrency=4 + - -compactor.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -compactor.ring.multi.primary=consul + - -compactor.ring.multi.secondary=memberlist + - -compactor.ring.prefix= + - -compactor.ring.store=multi + - -compactor.ring.wait-stability-min-duration=1m + - -compactor.split-and-merge-shards=0 + - -compactor.split-groups=1 + - -compactor.symbols-flushers-concurrency=4 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=compactor + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: compactor + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + - containerPort: 7946 + name: gossip-ring + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 6Gi + requests: + cpu: 1 + memory: 6Gi + volumeMounts: + - mountPath: /data + name: compactor-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 900 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: compactor-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 250Gi + storageClassName: standard +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: ingester + name: ingester + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 3 + selector: + matchLabels: + name: ingester + serviceName: ingester + template: + metadata: + labels: + gossip_ring_member: "true" + name: ingester + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: ingester + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -blocks-storage.tsdb.block-ranges-period=2h + - -blocks-storage.tsdb.dir=/data/tsdb + - -blocks-storage.tsdb.ship-interval=1m + - -distributor.health-check-ingesters=true + - -ingester.max-global-series-per-metric=20000 + - -ingester.max-global-series-per-user=150000 + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-period=15s + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.multi.primary=consul + - -ingester.ring.multi.secondary=memberlist + - -ingester.ring.num-tokens=512 + - -ingester.ring.prefix= + - -ingester.ring.readiness-check-ring-health=false + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=multi + - -ingester.ring.tokens-file-path=/data/tokens + - -ingester.ring.unregister-on-shutdown=true + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc-max-concurrent-streams=10000 + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=ingester + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ingester + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + - containerPort: 7946 + name: gossip-ring + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 25Gi + requests: + cpu: "4" + memory: 15Gi + volumeMounts: + - mountPath: /data + name: ingester-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 1200 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: ingester-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi + storageClassName: fast +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached + serviceName: memcached + template: + metadata: + labels: + name: memcached + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 6144 + - -I 1m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 9Gi + requests: + cpu: 500m + memory: 6552Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-frontend + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached-frontend + serviceName: memcached-frontend + template: + metadata: + labels: + name: memcached-frontend + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-frontend + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 1024 + - -I 5m + - -c 1024 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 1536Mi + requests: + cpu: 500m + memory: 1329Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-index-queries + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached-index-queries + serviceName: memcached-index-queries + template: + metadata: + labels: + name: memcached-index-queries + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-index-queries + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 1024 + - -I 5m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 1536Mi + requests: + cpu: 500m + memory: 1329Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-metadata + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + name: memcached-metadata + serviceName: memcached-metadata + template: + metadata: + labels: + name: memcached-metadata + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-metadata + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 512 + - -I 1m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 768Mi + requests: + cpu: 500m + memory: 715Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: store-gateway + name: store-gateway + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 3 + selector: + matchLabels: + name: store-gateway + serviceName: store-gateway + template: + metadata: + labels: + gossip_ring_member: "true" + name: store-gateway + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: store-gateway + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.chunks-cache.backend=memcached + - -blocks-storage.bucket-store.chunks-cache.memcached.addresses=dnssrvnoa+memcached.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms + - -blocks-storage.bucket-store.index-cache.backend=memcached + - -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880 + - -blocks-storage.bucket-store.index-header-lazy-loading-enabled=true + - -blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout=60m + - -blocks-storage.bucket-store.max-chunk-pool-bytes=12884901888 + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.multi.primary=consul + - -store-gateway.sharding-ring.multi.secondary=memberlist + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=multi + - -store-gateway.sharding-ring.tokens-file-path=/data/tokens + - -store-gateway.sharding-ring.wait-stability-min-duration=1m + - -target=store-gateway + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: store-gateway + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + - containerPort: 7946 + name: gossip-ring + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 18Gi + requests: + cpu: "1" + memory: 12Gi + volumeMounts: + - mountPath: /data + name: store-gateway-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 120 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: store-gateway-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Gi + storageClassName: standard +--- +apiVersion: etcd.database.coreos.com/v1beta2 +kind: EtcdCluster +metadata: + annotations: + etcd.database.coreos.com/scope: clusterwide + name: etcd + namespace: default +spec: + pod: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + etcd_cluster: etcd + topologyKey: kubernetes.io/hostname + annotations: + prometheus.io/port: "2379" + prometheus.io/scrape: "true" + etcdEnv: + - name: ETCD_AUTO_COMPACTION_RETENTION + value: 1h + labels: + name: etcd + resources: + limits: + memory: 512Mi + requests: + cpu: 500m + memory: 512Mi + size: 3 + version: 3.3.13 diff --git a/operations/mimir-tests/test-gossip-multikv.jsonnet b/operations/mimir-tests/test-memberlist-migration-step-2.jsonnet similarity index 89% rename from operations/mimir-tests/test-gossip-multikv.jsonnet rename to operations/mimir-tests/test-memberlist-migration-step-2.jsonnet index d33ea6c9bb..98bf373bbc 100644 --- a/operations/mimir-tests/test-gossip-multikv.jsonnet +++ b/operations/mimir-tests/test-memberlist-migration-step-2.jsonnet @@ -5,8 +5,6 @@ mimir { namespace: 'default', external_url: 'http://test', - memberlist_ring_enabled: true, - blocks_storage_backend: 'gcs', blocks_storage_bucket_name: 'blocks-bucket', bucket_index_enabled: true, @@ -20,6 +18,9 @@ mimir { alertmanager_client_type: 'gcs', alertmanager_gcs_bucket_name: 'alerts-bucket', + // Step 2: enable mirroring. + memberlist_ring_enabled: true, multikv_migration_enabled: true, + multikv_mirror_enabled: true, }, } diff --git a/operations/mimir-tests/test-memberlist-migration-step-3-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-3-generated.yaml new file mode 100644 index 0000000000..da509001a8 --- /dev/null +++ b/operations/mimir-tests/test-memberlist-migration-step-3-generated.yaml @@ -0,0 +1,1868 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: default +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: alertmanager-pdb + name: alertmanager-pdb + namespace: default +spec: + maxUnavailable: 1 + selector: + matchLabels: + name: alertmanager +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: ingester-pdb + name: ingester-pdb + namespace: default +spec: + maxUnavailable: 1 + selector: + matchLabels: + name: ingester +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: store-gateway-pdb + name: store-gateway-pdb + namespace: default +spec: + maxUnavailable: 2 + selector: + matchLabels: + name: store-gateway +--- +apiVersion: v1 +kind: ServiceAccount +metadata: + name: consul-sidekick + namespace: default +--- +apiVersion: v1 +data: + consul-config.json: '{"leave_on_terminate": true, "raft_snapshot_threshold": 128, + "raft_trailing_logs": 10000, "telemetry": {"dogstatsd_addr": "127.0.0.1:9125"}}' + mapping: | + mappings: + - match: consul.*.runtime.* + name: consul_runtime + labels: + type: $2 + - match: consul.runtime.total_gc_pause_ns + name: consul_runtime_total_gc_pause_ns + labels: + type: $2 + - match: consul.consul.health.service.query-tag.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3 + - match: consul.consul.health.service.query-tag.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4 + - match: consul.consul.health.service.query-tag.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11 + - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.*.* + name: consul_health_service_query_tag + labels: + query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11.$12 + - match: consul.consul.catalog.deregister + name: consul_catalog_deregister + labels: {} + - match: consul.consul.dns.domain_query.*.*.*.*.* + name: consul_dns_domain_query + labels: + query: $1.$2.$3.$4.$5 + - match: consul.consul.health.service.not-found.* + name: consul_health_service_not_found + labels: + query: $1 + - match: consul.consul.health.service.query.* + name: consul_health_service_query + labels: + query: $1 + - match: consul.*.memberlist.health.score + name: consul_memberlist_health_score + labels: {} + - match: consul.serf.queue.* + name: consul_serf_events + labels: + type: $1 + - match: consul.serf.snapshot.appendLine + name: consul_serf_snapshot_appendLine + labels: + type: $1 + - match: consul.serf.coordinate.adjustment-ms + name: consul_serf_coordinate_adjustment_ms + labels: {} + - match: consul.consul.rpc.query + name: consul_rpc_query + labels: {} + - match: consul.*.consul.session_ttl.active + name: consul_session_ttl_active + labels: {} + - match: consul.raft.rpc.* + name: consul_raft_rpc + labels: + type: $1 + - match: consul.raft.rpc.appendEntries.storeLogs + name: consul_raft_rpc_appendEntries_storeLogs + labels: + type: $1 + - match: consul.consul.fsm.persist + name: consul_fsm_persist + labels: {} + - match: consul.raft.fsm.apply + name: consul_raft_fsm_apply + labels: {} + - match: consul.raft.leader.lastContact + name: consul_raft_leader_lastcontact + labels: {} + - match: consul.raft.leader.dispatchLog + name: consul_raft_leader_dispatchLog + labels: {} + - match: consul.raft.commitTime + name: consul_raft_commitTime + labels: {} + - match: consul.raft.replication.appendEntries.logs.*.*.*.* + name: consul_raft_replication_appendEntries_logs + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.raft.replication.appendEntries.rpc.*.*.*.* + name: consul_raft_replication_appendEntries_rpc + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.raft.replication.heartbeat.*.*.*.* + name: consul_raft_replication_heartbeat + labels: + query: ${1}.${2}.${3}.${4} + - match: consul.consul.rpc.request + name: consul_rpc_requests + labels: {} + - match: consul.consul.rpc.accept_conn + name: consul_rpc_accept_conn + labels: {} + - match: consul.memberlist.udp.* + name: consul_memberlist_udp + labels: + type: $1 + - match: consul.memberlist.tcp.* + name: consul_memberlist_tcp + labels: + type: $1 + - match: consul.memberlist.gossip + name: consul_memberlist_gossip + labels: {} + - match: consul.memberlist.probeNode + name: consul_memberlist_probenode + labels: {} + - match: consul.memberlist.pushPullNode + name: consul_memberlist_pushpullnode + labels: {} + - match: consul.http.* + name: consul_http_request + labels: + method: $1 + path: / + - match: consul.http.*.* + name: consul_http_request + labels: + method: $1 + path: /$2 + - match: consul.http.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3 + - match: consul.http.*.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3/$4 + - match: consul.http.*.*.*.*.* + name: consul_http_request + labels: + method: $1 + path: /$2/$3/$4/$5 + - match: consul.consul.leader.barrier + name: consul_leader_barrier + labels: {} + - match: consul.consul.leader.reconcileMember + name: consul_leader_reconcileMember + labels: {} + - match: consul.consul.leader.reconcile + name: consul_leader_reconcile + labels: {} + - match: consul.consul.fsm.coordinate.batch-update + name: consul_fsm_coordinate_batch_update + labels: {} + - match: consul.consul.fsm.autopilot + name: consul_fsm_autopilot + labels: {} + - match: consul.consul.fsm.kvs.cas + name: consul_fsm_kvs_cas + labels: {} + - match: consul.consul.fsm.register + name: consul_fsm_register + labels: {} + - match: consul.consul.fsm.deregister + name: consul_fsm_deregister + labels: {} + - match: consul.consul.fsm.tombstone.reap + name: consul_fsm_tombstone_reap + labels: {} + - match: consul.consul.catalog.register + name: consul_catalog_register + labels: {} + - match: consul.consul.catalog.deregister + name: consul_catalog_deregister + labels: {} + - match: consul.consul.leader.reapTombstones + name: consul_leader_reapTombstones + labels: {} +kind: ConfigMap +metadata: + name: consul + namespace: default +--- +apiVersion: v1 +data: + overrides.yaml: | + multi_kv_config: + mirror_enabled: true + primary: memberlist + overrides: {} +kind: ConfigMap +metadata: + name: overrides + namespace: default +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: consul-sidekick + namespace: default +rules: +- apiGroups: + - "" + - extensions + - apps + resources: + - pods + - replicasets + verbs: + - get + - list + - watch +--- +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: consul-sidekick + namespace: default +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: consul-sidekick +subjects: +- kind: ServiceAccount + name: consul-sidekick + namespace: default +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: alertmanager + name: alertmanager + namespace: default +spec: + clusterIP: None + ports: + - name: alertmanager-http-metrics + port: 8080 + targetPort: 8080 + - name: alertmanager-grpc + port: 9095 + targetPort: 9095 + - name: alertmanager-gossip-ring + port: 7946 + targetPort: 7946 + selector: + name: alertmanager +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: compactor + name: compactor + namespace: default +spec: + clusterIP: None + ports: + - name: compactor-http-metrics + port: 8080 + targetPort: 8080 + - name: compactor-grpc + port: 9095 + targetPort: 9095 + - name: compactor-gossip-ring + port: 7946 + targetPort: 7946 + selector: + name: compactor +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: consul + name: consul + namespace: default +spec: + ports: + - name: consul-server + port: 8300 + targetPort: 8300 + - name: consul-serf + port: 8301 + targetPort: 8301 + - name: consul-client + port: 8400 + targetPort: 8400 + - name: consul-api + port: 8500 + targetPort: 8500 + - name: statsd-exporter-http-metrics + port: 8000 + targetPort: 8000 + - name: consul-exporter-http-metrics + port: 9107 + targetPort: 9107 + selector: + name: consul +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: distributor + name: distributor + namespace: default +spec: + clusterIP: None + ports: + - name: distributor-http-metrics + port: 8080 + targetPort: 8080 + - name: distributor-grpc + port: 9095 + targetPort: 9095 + - name: distributor-gossip-ring + port: 7946 + targetPort: 7946 + selector: + name: distributor +--- +apiVersion: v1 +kind: Service +metadata: + name: gossip-ring + namespace: default +spec: + clusterIP: None + ports: + - name: gossip-ring + port: 7946 + protocol: TCP + targetPort: 7946 + selector: + gossip_ring_member: "true" +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ingester + name: ingester + namespace: default +spec: + ports: + - name: ingester-http-metrics + port: 8080 + targetPort: 8080 + - name: ingester-grpc + port: 9095 + targetPort: 9095 + - name: ingester-gossip-ring + port: 7946 + targetPort: 7946 + selector: + name: ingester +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached + name: memcached + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-frontend + name: memcached-frontend + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-index-queries + name: memcached-index-queries + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-index-queries +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-metadata + name: memcached-metadata + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-metadata +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: querier + name: querier + namespace: default +spec: + ports: + - name: querier-http-metrics + port: 8080 + targetPort: 8080 + - name: querier-grpc + port: 9095 + targetPort: 9095 + - name: querier-gossip-ring + port: 7946 + targetPort: 7946 + selector: + name: querier +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-frontend + name: query-frontend + namespace: default +spec: + ports: + - name: query-frontend-http-metrics + port: 8080 + targetPort: 8080 + - name: query-frontend-grpc + port: 9095 + targetPort: 9095 + selector: + name: query-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-frontend + name: query-frontend-discovery + namespace: default +spec: + clusterIP: None + ports: + - name: query-frontend-http-metrics + port: 8080 + targetPort: 8080 + - name: query-frontend-grpc + port: 9095 + targetPort: 9095 + publishNotReadyAddresses: true + selector: + name: query-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-scheduler + name: query-scheduler + namespace: default +spec: + ports: + - name: query-scheduler-http-metrics + port: 8080 + targetPort: 8080 + - name: query-scheduler-grpc + port: 9095 + targetPort: 9095 + selector: + name: query-scheduler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-scheduler + name: query-scheduler-discovery + namespace: default +spec: + clusterIP: None + ports: + - name: query-scheduler-http-metrics + port: 8080 + targetPort: 8080 + - name: query-scheduler-grpc + port: 9095 + targetPort: 9095 + publishNotReadyAddresses: true + selector: + name: query-scheduler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ruler + name: ruler + namespace: default +spec: + ports: + - name: ruler-http-metrics + port: 8080 + targetPort: 8080 + - name: ruler-grpc + port: 9095 + targetPort: 9095 + selector: + name: ruler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: store-gateway + name: store-gateway + namespace: default +spec: + ports: + - name: store-gateway-http-metrics + port: 8080 + targetPort: 8080 + - name: store-gateway-grpc + port: 9095 + targetPort: 9095 + - name: store-gateway-gossip-ring + port: 7946 + targetPort: 7946 + selector: + name: store-gateway +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: consul + namespace: default +spec: + minReadySeconds: 10 + replicas: 1 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: consul + template: + metadata: + annotations: + consul-hash: e56ef6821a3557604caccaf6d5820239 + labels: + name: consul + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: consul + topologyKey: kubernetes.io/hostname + - labelSelector: + matchLabels: + name: ingester + namespaces: + - default + topologyKey: kubernetes.io/hostname + containers: + - args: + - agent + - -ui + - -server + - -client=0.0.0.0 + - -config-file=/etc/config/consul-config.json + - -bootstrap-expect=1 + - -ui-content-path=/default/consul/ + env: + - name: CHECKPOINT_DISABLE + value: "1" + image: consul:1.5.3 + imagePullPolicy: IfNotPresent + name: consul + ports: + - containerPort: 8300 + name: server + - containerPort: 8301 + name: serf + - containerPort: 8400 + name: client + - containerPort: 8500 + name: api + resources: + requests: + cpu: "4" + memory: 4Gi + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --namespace=$(POD_NAMESPACE) + - --pod-name=$(POD_NAME) + env: + - name: POD_NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace + - name: POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + image: weaveworks/consul-sidekick:master-f18ad13 + imagePullPolicy: IfNotPresent + name: sidekick + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --web.listen-address=:8000 + - --statsd.mapping-config=/etc/config/mapping + image: prom/statsd-exporter:v0.12.2 + imagePullPolicy: IfNotPresent + name: statsd-exporter + ports: + - containerPort: 8000 + name: http-metrics + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + - args: + - --consul.server=localhost:8500 + - --web.listen-address=:9107 + - --consul.timeout=1s + - --no-consul.health-summary + - --consul.allow_stale + image: prom/consul-exporter:v0.5.0 + imagePullPolicy: IfNotPresent + name: consul-exporter + ports: + - containerPort: 9107 + name: http-metrics + volumeMounts: + - mountPath: /etc/config + name: consul + - mountPath: /consul/data/ + name: data + serviceAccount: consul-sidekick + volumes: + - configMap: + name: consul + name: consul + - emptyDir: + medium: Memory + name: data +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: distributor + namespace: default +spec: + minReadySeconds: 10 + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: distributor + strategy: + rollingUpdate: + maxSurge: 5 + maxUnavailable: 1 + template: + metadata: + labels: + gossip_ring_member: "true" + name: distributor + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: distributor + topologyKey: kubernetes.io/hostname + containers: + - args: + - -distributor.ha-tracker.enable=true + - -distributor.ha-tracker.enable-for-all-users=true + - -distributor.ha-tracker.etcd.endpoints=etcd-client.default.svc.cluster.local.:2379 + - -distributor.ha-tracker.prefix=prom_ha/ + - -distributor.ha-tracker.store=etcd + - -distributor.health-check-ingesters=true + - -distributor.ingestion-burst-size=200000 + - -distributor.ingestion-rate-limit=10000 + - -distributor.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -distributor.ring.multi.primary=consul + - -distributor.ring.multi.secondary=memberlist + - -distributor.ring.prefix= + - -distributor.ring.store=multi + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.multi.primary=consul + - -ingester.ring.multi.secondary=memberlist + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=multi + - -mem-ballast-size-bytes=1073741824 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.max-connection-age=2m + - -server.grpc.keepalive.max-connection-age-grace=5m + - -server.grpc.keepalive.max-connection-idle=1m + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=distributor + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: distributor + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + - containerPort: 7946 + name: gossip-ring + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 4Gi + requests: + cpu: "2" + memory: 2Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: querier + namespace: default +spec: + minReadySeconds: 10 + replicas: 6 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: querier + strategy: + rollingUpdate: + maxSurge: 5 + maxUnavailable: 1 + template: + metadata: + labels: + gossip_ring_member: "true" + name: querier + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: querier + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -distributor.health-check-ingesters=true + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.multi.primary=consul + - -ingester.ring.multi.secondary=memberlist + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=multi + - -mem-ballast-size-bytes=268435456 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 + - -querier.frontend-client.grpc-max-send-msg-size=104857600 + - -querier.max-concurrent=8 + - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -server.http-write-timeout=1m + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.multi.primary=consul + - -store-gateway.sharding-ring.multi.secondary=memberlist + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=multi + - -store.max-query-length=768h + - -target=querier + env: + - name: JAEGER_REPORTER_MAX_QUEUE_SIZE + value: "1024" + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: querier + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + - containerPort: 7946 + name: gossip-ring + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 24Gi + requests: + cpu: "1" + memory: 12Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: query-frontend + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: query-frontend + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + template: + metadata: + labels: + name: query-frontend + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: query-frontend + topologyKey: kubernetes.io/hostname + containers: + - args: + - -query-frontend.align-querier-with-step=false + - -query-frontend.cache-results=true + - -query-frontend.max-cache-freshness=10m + - -query-frontend.results-cache.backend=memcached + - -query-frontend.results-cache.memcached.addresses=dnssrvnoa+memcached-frontend.default.svc.cluster.local:11211 + - -query-frontend.results-cache.memcached.timeout=500ms + - -query-frontend.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -server.http-write-timeout=1m + - -store.max-query-length=12000h + - -target=query-frontend + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: query-frontend + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 1200Mi + requests: + cpu: "2" + memory: 600Mi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: query-scheduler + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: query-scheduler + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + name: query-scheduler + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: query-scheduler + topologyKey: kubernetes.io/hostname + containers: + - args: + - -query-scheduler.max-outstanding-requests-per-tenant=100 + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=query-scheduler + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: query-scheduler + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 2Gi + requests: + cpu: "2" + memory: 1Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ruler + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: ruler + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + gossip_ring_member: "true" + name: ruler + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: ruler + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -distributor.health-check-ingesters=true + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.multi.primary=consul + - -ingester.ring.multi.secondary=memberlist + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=multi + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 + - -ruler-storage.backend=gcs + - -ruler-storage.gcs.bucket-name=rules-bucket + - -ruler.alertmanager-url=http://alertmanager.default.svc.cluster.local/alertmanager + - -ruler.max-rule-groups-per-tenant=35 + - -ruler.max-rules-per-rule-group=20 + - -ruler.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ruler.ring.multi.primary=consul + - -ruler.ring.multi.secondary=memberlist + - -ruler.ring.store=multi + - -ruler.rule-path=/rules + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.multi.primary=consul + - -store-gateway.sharding-ring.multi.secondary=memberlist + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=multi + - -store.max-query-length=768h + - -target=ruler + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ruler + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + cpu: "16" + memory: 16Gi + requests: + cpu: "1" + memory: 6Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + terminationGracePeriodSeconds: 600 + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: alertmanager + name: alertmanager + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: alertmanager + serviceName: alertmanager + template: + metadata: + labels: + gossip_ring_member: "true" + name: alertmanager + spec: + containers: + - args: + - -alertmanager-storage.backend=gcs + - -alertmanager-storage.gcs.bucket-name=alerts-bucket + - -alertmanager.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -alertmanager.sharding-ring.multi.primary=consul + - -alertmanager.sharding-ring.multi.secondary=memberlist + - -alertmanager.sharding-ring.replication-factor=3 + - -alertmanager.sharding-ring.store=multi + - -alertmanager.storage.path=/data + - -alertmanager.web.external-url=http://test/alertmanager + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=alertmanager + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: alertmanager + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + - containerPort: 7946 + name: gossip-ring + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 15Gi + requests: + cpu: "2" + memory: 10Gi + volumeMounts: + - mountPath: /data + name: alertmanager-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 900 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: alertmanager-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: compactor + name: compactor + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + name: compactor + serviceName: compactor + template: + metadata: + labels: + gossip_ring_member: "true" + name: compactor + spec: + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -compactor.block-ranges=2h,12h,24h + - -compactor.blocks-retention-period=0 + - -compactor.cleanup-interval=15m + - -compactor.compaction-concurrency=1 + - -compactor.compaction-interval=30m + - -compactor.compactor-tenant-shard-size=1 + - -compactor.data-dir=/data + - -compactor.deletion-delay=2h + - -compactor.max-closing-blocks-concurrency=2 + - -compactor.max-opening-blocks-concurrency=4 + - -compactor.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -compactor.ring.multi.primary=consul + - -compactor.ring.multi.secondary=memberlist + - -compactor.ring.prefix= + - -compactor.ring.store=multi + - -compactor.ring.wait-stability-min-duration=1m + - -compactor.split-and-merge-shards=0 + - -compactor.split-groups=1 + - -compactor.symbols-flushers-concurrency=4 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=compactor + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: compactor + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + - containerPort: 7946 + name: gossip-ring + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 6Gi + requests: + cpu: 1 + memory: 6Gi + volumeMounts: + - mountPath: /data + name: compactor-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 900 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: compactor-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 250Gi + storageClassName: standard +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: ingester + name: ingester + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 3 + selector: + matchLabels: + name: ingester + serviceName: ingester + template: + metadata: + labels: + gossip_ring_member: "true" + name: ingester + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: ingester + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -blocks-storage.tsdb.block-ranges-period=2h + - -blocks-storage.tsdb.dir=/data/tsdb + - -blocks-storage.tsdb.ship-interval=1m + - -distributor.health-check-ingesters=true + - -ingester.max-global-series-per-metric=20000 + - -ingester.max-global-series-per-user=150000 + - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -ingester.ring.heartbeat-period=15s + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.multi.primary=consul + - -ingester.ring.multi.secondary=memberlist + - -ingester.ring.num-tokens=512 + - -ingester.ring.prefix= + - -ingester.ring.readiness-check-ring-health=false + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=multi + - -ingester.ring.tokens-file-path=/data/tokens + - -ingester.ring.unregister-on-shutdown=true + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc-max-concurrent-streams=10000 + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=ingester + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ingester + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + - containerPort: 7946 + name: gossip-ring + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 25Gi + requests: + cpu: "4" + memory: 15Gi + volumeMounts: + - mountPath: /data + name: ingester-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 1200 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: ingester-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi + storageClassName: fast +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached + serviceName: memcached + template: + metadata: + labels: + name: memcached + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 6144 + - -I 1m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 9Gi + requests: + cpu: 500m + memory: 6552Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-frontend + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached-frontend + serviceName: memcached-frontend + template: + metadata: + labels: + name: memcached-frontend + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-frontend + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 1024 + - -I 5m + - -c 1024 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 1536Mi + requests: + cpu: 500m + memory: 1329Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-index-queries + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached-index-queries + serviceName: memcached-index-queries + template: + metadata: + labels: + name: memcached-index-queries + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-index-queries + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 1024 + - -I 5m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 1536Mi + requests: + cpu: 500m + memory: 1329Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-metadata + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + name: memcached-metadata + serviceName: memcached-metadata + template: + metadata: + labels: + name: memcached-metadata + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-metadata + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 512 + - -I 1m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 768Mi + requests: + cpu: 500m + memory: 715Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: store-gateway + name: store-gateway + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 3 + selector: + matchLabels: + name: store-gateway + serviceName: store-gateway + template: + metadata: + labels: + gossip_ring_member: "true" + name: store-gateway + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: store-gateway + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.chunks-cache.backend=memcached + - -blocks-storage.bucket-store.chunks-cache.memcached.addresses=dnssrvnoa+memcached.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms + - -blocks-storage.bucket-store.index-cache.backend=memcached + - -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880 + - -blocks-storage.bucket-store.index-header-lazy-loading-enabled=true + - -blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout=60m + - -blocks-storage.bucket-store.max-chunk-pool-bytes=12884901888 + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -store-gateway.sharding-ring.multi.primary=consul + - -store-gateway.sharding-ring.multi.secondary=memberlist + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=multi + - -store-gateway.sharding-ring.tokens-file-path=/data/tokens + - -store-gateway.sharding-ring.wait-stability-min-duration=1m + - -target=store-gateway + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: store-gateway + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + - containerPort: 7946 + name: gossip-ring + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 18Gi + requests: + cpu: "1" + memory: 12Gi + volumeMounts: + - mountPath: /data + name: store-gateway-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 120 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: store-gateway-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Gi + storageClassName: standard +--- +apiVersion: etcd.database.coreos.com/v1beta2 +kind: EtcdCluster +metadata: + annotations: + etcd.database.coreos.com/scope: clusterwide + name: etcd + namespace: default +spec: + pod: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + etcd_cluster: etcd + topologyKey: kubernetes.io/hostname + annotations: + prometheus.io/port: "2379" + prometheus.io/scrape: "true" + etcdEnv: + - name: ETCD_AUTO_COMPACTION_RETENTION + value: 1h + labels: + name: etcd + resources: + limits: + memory: 512Mi + requests: + cpu: 500m + memory: 512Mi + size: 3 + version: 3.3.13 diff --git a/operations/mimir-tests/test-memberlist-migration-step-3.jsonnet b/operations/mimir-tests/test-memberlist-migration-step-3.jsonnet new file mode 100644 index 0000000000..ca206f58fb --- /dev/null +++ b/operations/mimir-tests/test-memberlist-migration-step-3.jsonnet @@ -0,0 +1,27 @@ +local mimir = import 'mimir/mimir.libsonnet'; + +mimir { + _config+:: { + namespace: 'default', + external_url: 'http://test', + + blocks_storage_backend: 'gcs', + blocks_storage_bucket_name: 'blocks-bucket', + bucket_index_enabled: true, + query_scheduler_enabled: true, + + ruler_enabled: true, + ruler_client_type: 'gcs', + ruler_storage_bucket_name: 'rules-bucket', + + alertmanager_enabled: true, + alertmanager_client_type: 'gcs', + alertmanager_gcs_bucket_name: 'alerts-bucket', + + // Step 3: switch primary (consul) and secondary (memberlist), make memberlist primary KV. + memberlist_ring_enabled: true, + multikv_migration_enabled: true, + multikv_mirror_enabled: true, + multikv_switch_primary_secondary: true, + }, +} diff --git a/operations/mimir-tests/test-gossip-multikv-switch-primary-secondary-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-4-generated.yaml similarity index 100% rename from operations/mimir-tests/test-gossip-multikv-switch-primary-secondary-generated.yaml rename to operations/mimir-tests/test-memberlist-migration-step-4-generated.yaml diff --git a/operations/mimir-tests/test-memberlist-migration-step-4.jsonnet b/operations/mimir-tests/test-memberlist-migration-step-4.jsonnet new file mode 100644 index 0000000000..33939e799f --- /dev/null +++ b/operations/mimir-tests/test-memberlist-migration-step-4.jsonnet @@ -0,0 +1,27 @@ +local mimir = import 'mimir/mimir.libsonnet'; + +mimir { + _config+:: { + namespace: 'default', + external_url: 'http://test', + + blocks_storage_backend: 'gcs', + blocks_storage_bucket_name: 'blocks-bucket', + bucket_index_enabled: true, + query_scheduler_enabled: true, + + ruler_enabled: true, + ruler_client_type: 'gcs', + ruler_storage_bucket_name: 'rules-bucket', + + alertmanager_enabled: true, + alertmanager_client_type: 'gcs', + alertmanager_gcs_bucket_name: 'alerts-bucket', + + // Step 4: disable mirroring from primary (now memberlist) to secondary (now Consul) KV. + memberlist_ring_enabled: true, + multikv_migration_enabled: true, + multikv_mirror_enabled: false, + multikv_switch_primary_secondary: true, + }, +} diff --git a/operations/mimir-tests/test-gossip-multikv-teardown-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-5-generated.yaml similarity index 100% rename from operations/mimir-tests/test-gossip-multikv-teardown-generated.yaml rename to operations/mimir-tests/test-memberlist-migration-step-5-generated.yaml diff --git a/operations/mimir-tests/test-memberlist-migration-step-5.jsonnet b/operations/mimir-tests/test-memberlist-migration-step-5.jsonnet new file mode 100644 index 0000000000..32894303ee --- /dev/null +++ b/operations/mimir-tests/test-memberlist-migration-step-5.jsonnet @@ -0,0 +1,29 @@ +local mimir = import 'mimir/mimir.libsonnet'; + +mimir { + _config+:: { + namespace: 'default', + external_url: 'http://test', + + blocks_storage_backend: 'gcs', + blocks_storage_bucket_name: 'blocks-bucket', + bucket_index_enabled: true, + query_scheduler_enabled: true, + + ruler_enabled: true, + ruler_client_type: 'gcs', + ruler_storage_bucket_name: 'rules-bucket', + + alertmanager_enabled: true, + alertmanager_client_type: 'gcs', + alertmanager_gcs_bucket_name: 'alerts-bucket', + + // Step 5: disable migration (ie. use of multi KV), but keep runtime config around for components that haven't restarted yet. + // Note: this also removes Consul. That's fine, because it's not used anymore (mirroring to it was disabled in step 4). + memberlist_ring_enabled: true, + multikv_migration_enabled: false, + multikv_mirror_enabled: false, + multikv_switch_primary_secondary: true, + multikv_migration_teardown: true, + }, +} diff --git a/operations/mimir-tests/test-memberlist-migration-step-6-final-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-6-final-generated.yaml new file mode 100644 index 0000000000..6ea4883c89 --- /dev/null +++ b/operations/mimir-tests/test-memberlist-migration-step-6-final-generated.yaml @@ -0,0 +1,1426 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: default +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: alertmanager-pdb + name: alertmanager-pdb + namespace: default +spec: + maxUnavailable: 1 + selector: + matchLabels: + name: alertmanager +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: ingester-pdb + name: ingester-pdb + namespace: default +spec: + maxUnavailable: 1 + selector: + matchLabels: + name: ingester +--- +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + labels: + name: store-gateway-pdb + name: store-gateway-pdb + namespace: default +spec: + maxUnavailable: 2 + selector: + matchLabels: + name: store-gateway +--- +apiVersion: v1 +data: + overrides.yaml: | + overrides: {} +kind: ConfigMap +metadata: + name: overrides + namespace: default +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: alertmanager + name: alertmanager + namespace: default +spec: + clusterIP: None + ports: + - name: alertmanager-http-metrics + port: 8080 + targetPort: 8080 + - name: alertmanager-grpc + port: 9095 + targetPort: 9095 + - name: alertmanager-gossip-ring + port: 7946 + targetPort: 7946 + selector: + name: alertmanager +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: compactor + name: compactor + namespace: default +spec: + clusterIP: None + ports: + - name: compactor-http-metrics + port: 8080 + targetPort: 8080 + - name: compactor-grpc + port: 9095 + targetPort: 9095 + - name: compactor-gossip-ring + port: 7946 + targetPort: 7946 + selector: + name: compactor +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: distributor + name: distributor + namespace: default +spec: + clusterIP: None + ports: + - name: distributor-http-metrics + port: 8080 + targetPort: 8080 + - name: distributor-grpc + port: 9095 + targetPort: 9095 + - name: distributor-gossip-ring + port: 7946 + targetPort: 7946 + selector: + name: distributor +--- +apiVersion: v1 +kind: Service +metadata: + name: gossip-ring + namespace: default +spec: + clusterIP: None + ports: + - name: gossip-ring + port: 7946 + protocol: TCP + targetPort: 7946 + selector: + gossip_ring_member: "true" +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ingester + name: ingester + namespace: default +spec: + ports: + - name: ingester-http-metrics + port: 8080 + targetPort: 8080 + - name: ingester-grpc + port: 9095 + targetPort: 9095 + - name: ingester-gossip-ring + port: 7946 + targetPort: 7946 + selector: + name: ingester +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached + name: memcached + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-frontend + name: memcached-frontend + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-index-queries + name: memcached-index-queries + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-index-queries +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: memcached-metadata + name: memcached-metadata + namespace: default +spec: + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + - name: exporter-http-metrics + port: 9150 + targetPort: 9150 + selector: + name: memcached-metadata +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: querier + name: querier + namespace: default +spec: + ports: + - name: querier-http-metrics + port: 8080 + targetPort: 8080 + - name: querier-grpc + port: 9095 + targetPort: 9095 + - name: querier-gossip-ring + port: 7946 + targetPort: 7946 + selector: + name: querier +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-frontend + name: query-frontend + namespace: default +spec: + ports: + - name: query-frontend-http-metrics + port: 8080 + targetPort: 8080 + - name: query-frontend-grpc + port: 9095 + targetPort: 9095 + selector: + name: query-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-frontend + name: query-frontend-discovery + namespace: default +spec: + clusterIP: None + ports: + - name: query-frontend-http-metrics + port: 8080 + targetPort: 8080 + - name: query-frontend-grpc + port: 9095 + targetPort: 9095 + publishNotReadyAddresses: true + selector: + name: query-frontend +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-scheduler + name: query-scheduler + namespace: default +spec: + ports: + - name: query-scheduler-http-metrics + port: 8080 + targetPort: 8080 + - name: query-scheduler-grpc + port: 9095 + targetPort: 9095 + selector: + name: query-scheduler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: query-scheduler + name: query-scheduler-discovery + namespace: default +spec: + clusterIP: None + ports: + - name: query-scheduler-http-metrics + port: 8080 + targetPort: 8080 + - name: query-scheduler-grpc + port: 9095 + targetPort: 9095 + publishNotReadyAddresses: true + selector: + name: query-scheduler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: ruler + name: ruler + namespace: default +spec: + ports: + - name: ruler-http-metrics + port: 8080 + targetPort: 8080 + - name: ruler-grpc + port: 9095 + targetPort: 9095 + selector: + name: ruler +--- +apiVersion: v1 +kind: Service +metadata: + labels: + name: store-gateway + name: store-gateway + namespace: default +spec: + ports: + - name: store-gateway-http-metrics + port: 8080 + targetPort: 8080 + - name: store-gateway-grpc + port: 9095 + targetPort: 9095 + - name: store-gateway-gossip-ring + port: 7946 + targetPort: 7946 + selector: + name: store-gateway +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: distributor + namespace: default +spec: + minReadySeconds: 10 + replicas: 3 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: distributor + strategy: + rollingUpdate: + maxSurge: 5 + maxUnavailable: 1 + template: + metadata: + labels: + gossip_ring_member: "true" + name: distributor + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: distributor + topologyKey: kubernetes.io/hostname + containers: + - args: + - -distributor.ha-tracker.enable=true + - -distributor.ha-tracker.enable-for-all-users=true + - -distributor.ha-tracker.etcd.endpoints=etcd-client.default.svc.cluster.local.:2379 + - -distributor.ha-tracker.prefix=prom_ha/ + - -distributor.ha-tracker.store=etcd + - -distributor.health-check-ingesters=true + - -distributor.ingestion-burst-size=200000 + - -distributor.ingestion-rate-limit=10000 + - -distributor.ring.prefix= + - -distributor.ring.store=memberlist + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=memberlist + - -mem-ballast-size-bytes=1073741824 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.max-connection-age=2m + - -server.grpc.keepalive.max-connection-age-grace=5m + - -server.grpc.keepalive.max-connection-idle=1m + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=distributor + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: distributor + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + - containerPort: 7946 + name: gossip-ring + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 4Gi + requests: + cpu: "2" + memory: 2Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: querier + namespace: default +spec: + minReadySeconds: 10 + replicas: 6 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: querier + strategy: + rollingUpdate: + maxSurge: 5 + maxUnavailable: 1 + template: + metadata: + labels: + gossip_ring_member: "true" + name: querier + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: querier + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -distributor.health-check-ingesters=true + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=memberlist + - -mem-ballast-size-bytes=268435456 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 + - -querier.frontend-client.grpc-max-send-msg-size=104857600 + - -querier.max-concurrent=8 + - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -server.http-write-timeout=1m + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=memberlist + - -store.max-query-length=768h + - -target=querier + env: + - name: JAEGER_REPORTER_MAX_QUEUE_SIZE + value: "1024" + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: querier + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + - containerPort: 7946 + name: gossip-ring + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 24Gi + requests: + cpu: "1" + memory: 12Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: query-frontend + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: query-frontend + strategy: + rollingUpdate: + maxSurge: 1 + maxUnavailable: 1 + template: + metadata: + labels: + name: query-frontend + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: query-frontend + topologyKey: kubernetes.io/hostname + containers: + - args: + - -query-frontend.align-querier-with-step=false + - -query-frontend.cache-results=true + - -query-frontend.max-cache-freshness=10m + - -query-frontend.results-cache.backend=memcached + - -query-frontend.results-cache.memcached.addresses=dnssrvnoa+memcached-frontend.default.svc.cluster.local:11211 + - -query-frontend.results-cache.memcached.timeout=500ms + - -query-frontend.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -server.http-write-timeout=1m + - -store.max-query-length=12000h + - -target=query-frontend + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: query-frontend + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 1200Mi + requests: + cpu: "2" + memory: 600Mi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: query-scheduler + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: query-scheduler + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + name: query-scheduler + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: query-scheduler + topologyKey: kubernetes.io/hostname + containers: + - args: + - -query-scheduler.max-outstanding-requests-per-tenant=100 + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=query-scheduler + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: query-scheduler + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 2Gi + requests: + cpu: "2" + memory: 1Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: Deployment +metadata: + name: ruler + namespace: default +spec: + minReadySeconds: 10 + replicas: 2 + revisionHistoryLimit: 10 + selector: + matchLabels: + name: ruler + strategy: + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 + template: + metadata: + labels: + gossip_ring_member: "true" + name: ruler + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: ruler + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -distributor.health-check-ingesters=true + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.prefix= + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=memberlist + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 + - -ruler-storage.backend=gcs + - -ruler-storage.gcs.bucket-name=rules-bucket + - -ruler.alertmanager-url=http://alertmanager.default.svc.cluster.local/alertmanager + - -ruler.max-rule-groups-per-tenant=35 + - -ruler.max-rules-per-rule-group=20 + - -ruler.ring.store=memberlist + - -ruler.rule-path=/rules + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=memberlist + - -store.max-query-length=768h + - -target=ruler + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ruler + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + cpu: "16" + memory: 16Gi + requests: + cpu: "1" + memory: 6Gi + volumeMounts: + - mountPath: /etc/mimir + name: overrides + terminationGracePeriodSeconds: 600 + volumes: + - configMap: + name: overrides + name: overrides +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: alertmanager + name: alertmanager + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: alertmanager + serviceName: alertmanager + template: + metadata: + labels: + gossip_ring_member: "true" + name: alertmanager + spec: + containers: + - args: + - -alertmanager-storage.backend=gcs + - -alertmanager-storage.gcs.bucket-name=alerts-bucket + - -alertmanager.sharding-ring.replication-factor=3 + - -alertmanager.sharding-ring.store=memberlist + - -alertmanager.storage.path=/data + - -alertmanager.web.external-url=http://test/alertmanager + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=alertmanager + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: alertmanager + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + - containerPort: 7946 + name: gossip-ring + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 15Gi + requests: + cpu: "2" + memory: 10Gi + volumeMounts: + - mountPath: /data + name: alertmanager-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 900 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: alertmanager-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: compactor + name: compactor + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + name: compactor + serviceName: compactor + template: + metadata: + labels: + gossip_ring_member: "true" + name: compactor + spec: + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -compactor.block-ranges=2h,12h,24h + - -compactor.blocks-retention-period=0 + - -compactor.cleanup-interval=15m + - -compactor.compaction-concurrency=1 + - -compactor.compaction-interval=30m + - -compactor.compactor-tenant-shard-size=1 + - -compactor.data-dir=/data + - -compactor.deletion-delay=2h + - -compactor.max-closing-blocks-concurrency=2 + - -compactor.max-opening-blocks-concurrency=4 + - -compactor.ring.prefix= + - -compactor.ring.store=memberlist + - -compactor.ring.wait-stability-min-duration=1m + - -compactor.split-and-merge-shards=0 + - -compactor.split-groups=1 + - -compactor.symbols-flushers-concurrency=4 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=compactor + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: compactor + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + - containerPort: 7946 + name: gossip-ring + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 6Gi + requests: + cpu: 1 + memory: 6Gi + volumeMounts: + - mountPath: /data + name: compactor-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 900 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: compactor-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 250Gi + storageClassName: standard +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: ingester + name: ingester + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 3 + selector: + matchLabels: + name: ingester + serviceName: ingester + template: + metadata: + labels: + gossip_ring_member: "true" + name: ingester + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: ingester + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -blocks-storage.tsdb.block-ranges-period=2h + - -blocks-storage.tsdb.dir=/data/tsdb + - -blocks-storage.tsdb.ship-interval=1m + - -distributor.health-check-ingesters=true + - -ingester.max-global-series-per-metric=20000 + - -ingester.max-global-series-per-user=150000 + - -ingester.ring.heartbeat-period=15s + - -ingester.ring.heartbeat-timeout=10m + - -ingester.ring.num-tokens=512 + - -ingester.ring.prefix= + - -ingester.ring.readiness-check-ring-health=false + - -ingester.ring.replication-factor=3 + - -ingester.ring.store=memberlist + - -ingester.ring.tokens-file-path=/data/tokens + - -ingester.ring.unregister-on-shutdown=true + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc-max-concurrent-streams=10000 + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -target=ingester + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: ingester + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + - containerPort: 7946 + name: gossip-ring + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 25Gi + requests: + cpu: "4" + memory: 15Gi + volumeMounts: + - mountPath: /data + name: ingester-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 1200 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: ingester-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 100Gi + storageClassName: fast +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached + serviceName: memcached + template: + metadata: + labels: + name: memcached + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 6144 + - -I 1m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 9Gi + requests: + cpu: 500m + memory: 6552Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-frontend + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached-frontend + serviceName: memcached-frontend + template: + metadata: + labels: + name: memcached-frontend + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-frontend + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 1024 + - -I 5m + - -c 1024 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 1536Mi + requests: + cpu: 500m + memory: 1329Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-index-queries + namespace: default +spec: + replicas: 3 + selector: + matchLabels: + name: memcached-index-queries + serviceName: memcached-index-queries + template: + metadata: + labels: + name: memcached-index-queries + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-index-queries + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 1024 + - -I 5m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 1536Mi + requests: + cpu: 500m + memory: 1329Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: memcached-metadata + namespace: default +spec: + replicas: 1 + selector: + matchLabels: + name: memcached-metadata + serviceName: memcached-metadata + template: + metadata: + labels: + name: memcached-metadata + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: memcached-metadata + topologyKey: kubernetes.io/hostname + containers: + - args: + - -m 512 + - -I 1m + - -c 16384 + - -v + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + name: memcached + ports: + - containerPort: 11211 + name: client + resources: + limits: + memory: 768Mi + requests: + cpu: 500m + memory: 715Mi + - args: + - --memcached.address=localhost:11211 + - --web.listen-address=0.0.0.0:9150 + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + name: exporter + ports: + - containerPort: 9150 + name: http-metrics + updateStrategy: + type: RollingUpdate +--- +apiVersion: apps/v1 +kind: StatefulSet +metadata: + labels: + name: store-gateway + name: store-gateway + namespace: default +spec: + podManagementPolicy: Parallel + replicas: 3 + selector: + matchLabels: + name: store-gateway + serviceName: store-gateway + template: + metadata: + labels: + gossip_ring_member: "true" + name: store-gateway + spec: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + name: store-gateway + topologyKey: kubernetes.io/hostname + containers: + - args: + - -blocks-storage.backend=gcs + - -blocks-storage.bucket-store.chunks-cache.backend=memcached + - -blocks-storage.bucket-store.chunks-cache.memcached.addresses=dnssrvnoa+memcached.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.chunks-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.chunks-cache.memcached.timeout=450ms + - -blocks-storage.bucket-store.index-cache.backend=memcached + - -blocks-storage.bucket-store.index-cache.memcached.addresses=dnssrvnoa+memcached-index-queries.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.index-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.index-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.index-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.index-cache.memcached.max-item-size=5242880 + - -blocks-storage.bucket-store.index-header-lazy-loading-enabled=true + - -blocks-storage.bucket-store.index-header-lazy-loading-idle-timeout=60m + - -blocks-storage.bucket-store.max-chunk-pool-bytes=12884901888 + - -blocks-storage.bucket-store.metadata-cache.backend=memcached + - -blocks-storage.bucket-store.metadata-cache.memcached.addresses=dnssrvnoa+memcached-metadata.default.svc.cluster.local:11211 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-async-concurrency=50 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-get-multi-concurrency=100 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-idle-connections=100 + - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 + - -blocks-storage.bucket-store.sync-dir=/data/tsdb + - -blocks-storage.bucket-store.sync-interval=15m + - -blocks-storage.gcs.bucket-name=blocks-bucket + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 + - -runtime-config.file=/etc/mimir/overrides.yaml + - -server.grpc.keepalive.min-time-between-pings=10s + - -server.grpc.keepalive.ping-without-stream-allowed=true + - -server.http-listen-port=8080 + - -store-gateway.sharding-ring.prefix= + - -store-gateway.sharding-ring.replication-factor=3 + - -store-gateway.sharding-ring.store=memberlist + - -store-gateway.sharding-ring.tokens-file-path=/data/tokens + - -store-gateway.sharding-ring.wait-stability-min-duration=1m + - -target=store-gateway + image: grafana/mimir:2.1.0 + imagePullPolicy: IfNotPresent + name: store-gateway + ports: + - containerPort: 8080 + name: http-metrics + - containerPort: 9095 + name: grpc + - containerPort: 7946 + name: gossip-ring + readinessProbe: + httpGet: + path: /ready + port: 8080 + initialDelaySeconds: 15 + timeoutSeconds: 1 + resources: + limits: + memory: 18Gi + requests: + cpu: "1" + memory: 12Gi + volumeMounts: + - mountPath: /data + name: store-gateway-data + - mountPath: /etc/mimir + name: overrides + securityContext: + runAsUser: 0 + terminationGracePeriodSeconds: 120 + volumes: + - configMap: + name: overrides + name: overrides + updateStrategy: + type: RollingUpdate + volumeClaimTemplates: + - apiVersion: v1 + kind: PersistentVolumeClaim + metadata: + name: store-gateway-data + spec: + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 50Gi + storageClassName: standard +--- +apiVersion: etcd.database.coreos.com/v1beta2 +kind: EtcdCluster +metadata: + annotations: + etcd.database.coreos.com/scope: clusterwide + name: etcd + namespace: default +spec: + pod: + affinity: + podAntiAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + etcd_cluster: etcd + topologyKey: kubernetes.io/hostname + annotations: + prometheus.io/port: "2379" + prometheus.io/scrape: "true" + etcdEnv: + - name: ETCD_AUTO_COMPACTION_RETENTION + value: 1h + labels: + name: etcd + resources: + limits: + memory: 512Mi + requests: + cpu: 500m + memory: 512Mi + size: 3 + version: 3.3.13 diff --git a/operations/mimir-tests/test-gossip-multikv-teardown.jsonnet b/operations/mimir-tests/test-memberlist-migration-step-6-final.jsonnet similarity index 81% rename from operations/mimir-tests/test-gossip-multikv-teardown.jsonnet rename to operations/mimir-tests/test-memberlist-migration-step-6-final.jsonnet index 26d28c044f..99a5f29282 100644 --- a/operations/mimir-tests/test-gossip-multikv-teardown.jsonnet +++ b/operations/mimir-tests/test-memberlist-migration-step-6-final.jsonnet @@ -5,8 +5,6 @@ mimir { namespace: 'default', external_url: 'http://test', - memberlist_ring_enabled: true, - blocks_storage_backend: 'gcs', blocks_storage_bucket_name: 'blocks-bucket', bucket_index_enabled: true, @@ -20,8 +18,7 @@ mimir { alertmanager_client_type: 'gcs', alertmanager_gcs_bucket_name: 'alerts-bucket', - multikv_migration_enabled: false, - multikv_migration_teardown: true, - multikv_switch_primary_secondary: true, + // Step 6: remove all migration options, but keep memberlist ring enabled. + memberlist_ring_enabled: true, }, } diff --git a/operations/mimir-tests/test-shuffle-sharding-read-path-disabled-generated.yaml b/operations/mimir-tests/test-shuffle-sharding-read-path-disabled-generated.yaml index f879e5005d..be61b1ab60 100644 --- a/operations/mimir-tests/test-shuffle-sharding-read-path-disabled-generated.yaml +++ b/operations/mimir-tests/test-shuffle-sharding-read-path-disabled-generated.yaml @@ -43,225 +43,6 @@ spec: name: store-gateway --- apiVersion: v1 -kind: ServiceAccount -metadata: - name: consul-sidekick - namespace: default ---- -apiVersion: v1 -data: - consul-config.json: '{"leave_on_terminate": true, "raft_snapshot_threshold": 128, - "raft_trailing_logs": 10000, "telemetry": {"dogstatsd_addr": "127.0.0.1:9125"}}' - mapping: | - mappings: - - match: consul.*.runtime.* - name: consul_runtime - labels: - type: $2 - - match: consul.runtime.total_gc_pause_ns - name: consul_runtime_total_gc_pause_ns - labels: - type: $2 - - match: consul.consul.health.service.query-tag.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3 - - match: consul.consul.health.service.query-tag.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4 - - match: consul.consul.health.service.query-tag.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11 - - match: consul.consul.health.service.query-tag.*.*.*.*.*.*.*.*.*.*.*.* - name: consul_health_service_query_tag - labels: - query: $1.$2.$3.$4.$5.$6.$7.$8.$9.$10.$11.$12 - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.dns.domain_query.*.*.*.*.* - name: consul_dns_domain_query - labels: - query: $1.$2.$3.$4.$5 - - match: consul.consul.health.service.not-found.* - name: consul_health_service_not_found - labels: - query: $1 - - match: consul.consul.health.service.query.* - name: consul_health_service_query - labels: - query: $1 - - match: consul.*.memberlist.health.score - name: consul_memberlist_health_score - labels: {} - - match: consul.serf.queue.* - name: consul_serf_events - labels: - type: $1 - - match: consul.serf.snapshot.appendLine - name: consul_serf_snapshot_appendLine - labels: - type: $1 - - match: consul.serf.coordinate.adjustment-ms - name: consul_serf_coordinate_adjustment_ms - labels: {} - - match: consul.consul.rpc.query - name: consul_rpc_query - labels: {} - - match: consul.*.consul.session_ttl.active - name: consul_session_ttl_active - labels: {} - - match: consul.raft.rpc.* - name: consul_raft_rpc - labels: - type: $1 - - match: consul.raft.rpc.appendEntries.storeLogs - name: consul_raft_rpc_appendEntries_storeLogs - labels: - type: $1 - - match: consul.consul.fsm.persist - name: consul_fsm_persist - labels: {} - - match: consul.raft.fsm.apply - name: consul_raft_fsm_apply - labels: {} - - match: consul.raft.leader.lastContact - name: consul_raft_leader_lastcontact - labels: {} - - match: consul.raft.leader.dispatchLog - name: consul_raft_leader_dispatchLog - labels: {} - - match: consul.raft.commitTime - name: consul_raft_commitTime - labels: {} - - match: consul.raft.replication.appendEntries.logs.*.*.*.* - name: consul_raft_replication_appendEntries_logs - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.appendEntries.rpc.*.*.*.* - name: consul_raft_replication_appendEntries_rpc - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.raft.replication.heartbeat.*.*.*.* - name: consul_raft_replication_heartbeat - labels: - query: ${1}.${2}.${3}.${4} - - match: consul.consul.rpc.request - name: consul_rpc_requests - labels: {} - - match: consul.consul.rpc.accept_conn - name: consul_rpc_accept_conn - labels: {} - - match: consul.memberlist.udp.* - name: consul_memberlist_udp - labels: - type: $1 - - match: consul.memberlist.tcp.* - name: consul_memberlist_tcp - labels: - type: $1 - - match: consul.memberlist.gossip - name: consul_memberlist_gossip - labels: {} - - match: consul.memberlist.probeNode - name: consul_memberlist_probenode - labels: {} - - match: consul.memberlist.pushPullNode - name: consul_memberlist_pushpullnode - labels: {} - - match: consul.http.* - name: consul_http_request - labels: - method: $1 - path: / - - match: consul.http.*.* - name: consul_http_request - labels: - method: $1 - path: /$2 - - match: consul.http.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3 - - match: consul.http.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4 - - match: consul.http.*.*.*.*.* - name: consul_http_request - labels: - method: $1 - path: /$2/$3/$4/$5 - - match: consul.consul.leader.barrier - name: consul_leader_barrier - labels: {} - - match: consul.consul.leader.reconcileMember - name: consul_leader_reconcileMember - labels: {} - - match: consul.consul.leader.reconcile - name: consul_leader_reconcile - labels: {} - - match: consul.consul.fsm.coordinate.batch-update - name: consul_fsm_coordinate_batch_update - labels: {} - - match: consul.consul.fsm.autopilot - name: consul_fsm_autopilot - labels: {} - - match: consul.consul.fsm.kvs.cas - name: consul_fsm_kvs_cas - labels: {} - - match: consul.consul.fsm.register - name: consul_fsm_register - labels: {} - - match: consul.consul.fsm.deregister - name: consul_fsm_deregister - labels: {} - - match: consul.consul.fsm.tombstone.reap - name: consul_fsm_tombstone_reap - labels: {} - - match: consul.consul.catalog.register - name: consul_catalog_register - labels: {} - - match: consul.consul.catalog.deregister - name: consul_catalog_deregister - labels: {} - - match: consul.consul.leader.reapTombstones - name: consul_leader_reapTombstones - labels: {} -kind: ConfigMap -metadata: - name: consul - namespace: default ---- -apiVersion: v1 data: overrides.yaml: | overrides: {} @@ -270,38 +51,6 @@ metadata: name: overrides namespace: default --- -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: consul-sidekick - namespace: default -rules: -- apiGroups: - - "" - - extensions - - apps - resources: - - pods - - replicasets - verbs: - - get - - list - - watch ---- -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: consul-sidekick - namespace: default -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: consul-sidekick -subjects: -- kind: ServiceAccount - name: consul-sidekick - namespace: default ---- apiVersion: v1 kind: Service metadata: @@ -318,6 +67,9 @@ spec: - name: alertmanager-grpc port: 9095 targetPort: 9095 + - name: alertmanager-gossip-ring + port: 7946 + targetPort: 7946 selector: name: alertmanager --- @@ -337,41 +89,14 @@ spec: - name: compactor-grpc port: 9095 targetPort: 9095 + - name: compactor-gossip-ring + port: 7946 + targetPort: 7946 selector: name: compactor --- apiVersion: v1 kind: Service -metadata: - labels: - name: consul - name: consul - namespace: default -spec: - ports: - - name: consul-server - port: 8300 - targetPort: 8300 - - name: consul-serf - port: 8301 - targetPort: 8301 - - name: consul-client - port: 8400 - targetPort: 8400 - - name: consul-api - port: 8500 - targetPort: 8500 - - name: statsd-exporter-http-metrics - port: 8000 - targetPort: 8000 - - name: consul-exporter-http-metrics - port: 9107 - targetPort: 9107 - selector: - name: consul ---- -apiVersion: v1 -kind: Service metadata: labels: name: distributor @@ -386,11 +111,29 @@ spec: - name: distributor-grpc port: 9095 targetPort: 9095 + - name: distributor-gossip-ring + port: 7946 + targetPort: 7946 selector: name: distributor --- apiVersion: v1 kind: Service +metadata: + name: gossip-ring + namespace: default +spec: + clusterIP: None + ports: + - name: gossip-ring + port: 7946 + protocol: TCP + targetPort: 7946 + selector: + gossip_ring_member: "true" +--- +apiVersion: v1 +kind: Service metadata: labels: name: ingester @@ -404,6 +147,9 @@ spec: - name: ingester-grpc port: 9095 targetPort: 9095 + - name: ingester-gossip-ring + port: 7946 + targetPort: 7946 selector: name: ingester --- @@ -498,6 +244,9 @@ spec: - name: querier-grpc port: 9095 targetPort: 9095 + - name: querier-gossip-ring + port: 7946 + targetPort: 7946 selector: name: querier --- @@ -610,136 +359,14 @@ spec: - name: store-gateway-grpc port: 9095 targetPort: 9095 + - name: store-gateway-gossip-ring + port: 7946 + targetPort: 7946 selector: name: store-gateway --- apiVersion: apps/v1 kind: Deployment -metadata: - name: consul - namespace: default -spec: - minReadySeconds: 10 - replicas: 1 - revisionHistoryLimit: 10 - selector: - matchLabels: - name: consul - template: - metadata: - annotations: - consul-hash: e56ef6821a3557604caccaf6d5820239 - labels: - name: consul - spec: - affinity: - podAntiAffinity: - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - name: consul - topologyKey: kubernetes.io/hostname - - labelSelector: - matchLabels: - name: ingester - namespaces: - - default - topologyKey: kubernetes.io/hostname - containers: - - args: - - agent - - -ui - - -server - - -client=0.0.0.0 - - -config-file=/etc/config/consul-config.json - - -bootstrap-expect=1 - - -ui-content-path=/default/consul/ - env: - - name: CHECKPOINT_DISABLE - value: "1" - image: consul:1.5.3 - imagePullPolicy: IfNotPresent - name: consul - ports: - - containerPort: 8300 - name: server - - containerPort: 8301 - name: serf - - containerPort: 8400 - name: client - - containerPort: 8500 - name: api - resources: - requests: - cpu: "4" - memory: 4Gi - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --namespace=$(POD_NAMESPACE) - - --pod-name=$(POD_NAME) - env: - - name: POD_NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace - - name: POD_NAME - valueFrom: - fieldRef: - fieldPath: metadata.name - image: weaveworks/consul-sidekick:master-f18ad13 - imagePullPolicy: IfNotPresent - name: sidekick - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --web.listen-address=:8000 - - --statsd.mapping-config=/etc/config/mapping - image: prom/statsd-exporter:v0.12.2 - imagePullPolicy: IfNotPresent - name: statsd-exporter - ports: - - containerPort: 8000 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - - args: - - --consul.server=localhost:8500 - - --web.listen-address=:9107 - - --consul.timeout=1s - - --no-consul.health-summary - - --consul.allow_stale - image: prom/consul-exporter:v0.5.0 - imagePullPolicy: IfNotPresent - name: consul-exporter - ports: - - containerPort: 9107 - name: http-metrics - volumeMounts: - - mountPath: /etc/config - name: consul - - mountPath: /consul/data/ - name: data - serviceAccount: consul-sidekick - volumes: - - configMap: - name: consul - name: consul - - emptyDir: - medium: Memory - name: data ---- -apiVersion: apps/v1 -kind: Deployment metadata: name: distributor namespace: default @@ -757,6 +384,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: distributor spec: affinity: @@ -777,15 +405,16 @@ spec: - -distributor.ingestion-burst-size=200000 - -distributor.ingestion-rate-limit=10000 - -distributor.ingestion-tenant-shard-size=3 - - -distributor.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -distributor.ring.prefix= - - -distributor.ring.store=consul - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 + - -distributor.ring.store=memberlist - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=1073741824 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.max-connection-age=2m - -server.grpc.keepalive.max-connection-age-grace=5m @@ -802,6 +431,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -841,6 +472,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: querier spec: affinity: @@ -861,12 +493,14 @@ spec: - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - -distributor.health-check-ingesters=true - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 - -querier.max-concurrent=8 - -querier.scheduler-address=query-scheduler-discovery.default.svc.cluster.local:9095 @@ -876,10 +510,9 @@ spec: - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - -server.http-write-timeout=1m - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store-gateway.tenant-shard-size=3 - -store.max-query-length=768h - -target=querier @@ -894,6 +527,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1070,6 +705,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ruler spec: affinity: @@ -1091,29 +727,29 @@ spec: - -blocks-storage.gcs.bucket-name=blocks-bucket - -distributor.health-check-ingesters=true - -distributor.ingestion-tenant-shard-size=3 - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.shuffle-sharding-ingesters-enabled=false - -ruler-storage.backend=gcs - -ruler-storage.gcs.bucket-name=rules-bucket - -ruler.alertmanager-url=http://alertmanager.default.svc.cluster.local/alertmanager - -ruler.max-rule-groups-per-tenant=35 - -ruler.max-rules-per-rule-group=20 - - -ruler.ring.consul.hostname=consul.default.svc.cluster.local:8500 - - -ruler.ring.store=consul + - -ruler.ring.store=memberlist - -ruler.rule-path=/rules - -ruler.tenant-shard-size=2 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store-gateway.tenant-shard-size=3 - -store.max-query-length=768h - -target=ruler @@ -1163,17 +799,20 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: alertmanager spec: containers: - args: - -alertmanager-storage.backend=gcs - -alertmanager-storage.gcs.bucket-name=alerts-bucket - - -alertmanager.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -alertmanager.sharding-ring.replication-factor=3 - - -alertmanager.sharding-ring.store=consul + - -alertmanager.sharding-ring.store=memberlist - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true @@ -1192,6 +831,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1247,6 +888,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: compactor spec: containers: @@ -1263,13 +905,15 @@ spec: - -compactor.deletion-delay=2h - -compactor.max-closing-blocks-concurrency=2 - -compactor.max-opening-blocks-concurrency=4 - - -compactor.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -compactor.ring.prefix= - - -compactor.ring.store=consul + - -compactor.ring.store=memberlist - -compactor.ring.wait-stability-min-duration=1m - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true @@ -1283,6 +927,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1339,6 +985,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: ingester spec: affinity: @@ -1359,16 +1006,18 @@ spec: - -distributor.ingestion-tenant-shard-size=3 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 - -ingester.ring.heartbeat-period=15s - -ingester.ring.heartbeat-timeout=10m - -ingester.ring.num-tokens=512 - -ingester.ring.prefix= - -ingester.ring.readiness-check-ring-health=false - -ingester.ring.replication-factor=3 - - -ingester.ring.store=consul + - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc-max-concurrent-streams=10000 - -server.grpc.keepalive.min-time-between-pings=10s @@ -1383,6 +1032,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready @@ -1651,6 +1302,7 @@ spec: template: metadata: labels: + gossip_ring_member: "true" name: store-gateway spec: affinity: @@ -1688,14 +1340,16 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket + - -memberlist.abort-if-join-fails=false + - -memberlist.bind-port=7946 + - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml - -server.grpc.keepalive.min-time-between-pings=10s - -server.grpc.keepalive.ping-without-stream-allowed=true - -server.http-listen-port=8080 - - -store-gateway.sharding-ring.consul.hostname=consul.default.svc.cluster.local:8500 - -store-gateway.sharding-ring.prefix= - -store-gateway.sharding-ring.replication-factor=3 - - -store-gateway.sharding-ring.store=consul + - -store-gateway.sharding-ring.store=memberlist - -store-gateway.sharding-ring.tokens-file-path=/data/tokens - -store-gateway.sharding-ring.wait-stability-min-duration=1m - -store-gateway.tenant-shard-size=3 @@ -1708,6 +1362,8 @@ spec: name: http-metrics - containerPort: 9095 name: grpc + - containerPort: 7946 + name: gossip-ring readinessProbe: httpGet: path: /ready diff --git a/operations/mimir/memberlist.libsonnet b/operations/mimir/memberlist.libsonnet index 514a86cca2..e7ee7ad558 100644 --- a/operations/mimir/memberlist.libsonnet +++ b/operations/mimir/memberlist.libsonnet @@ -21,17 +21,30 @@ memberlist_ring_enabled: true, // Migrating from consul to memberlist is a multi-step process: - // 1) Enable multikv_migration_enabled, with primary=consul, secondary=memberlist, and multikv_mirror_enabled=false, restart components. + // + // 1) Enable memberlist_ring_enabled=true and multikv_migration_enabled=true, restart components. + // // 2) Set multikv_mirror_enabled=true. This doesn't require restart. - // 3) Swap multikv_primary and multikv_secondary, ie. multikv_primary=memberlist, multikv_secondary=consul. This doesn't require restart. - // 4) Set multikv_migration_enabled=false and multikv_migration_teardown=true. This requires a restart, but components will now use only memberlist. - // 5) Set multikv_migration_teardown=false. This doesn't require a restart. - multikv_migration_enabled: false, - multikv_migration_teardown: false, + // + // 3) Set multikv_switch_primary_secondary=true. This doesn't require restart. From this point on components use memberlist as primary KV store! + // + // 4) Set multikv_mirror_enabled=false. Stop mirroring writes to Consul. Doesn't require restart. + // + // 5) Set multikv_migration_enabled=false and multikv_migration_teardown=true. This requires a restart. + // After restart components will only use memberlist. Using multikv_migration_teardown=true guarantees that runtime config + // with multi KV configuration is preserved for components that haven't restarted yet. + // + // Note: this also removes Consul. That's fine, because it's not used anymore (mirroring to it was disabled in step 4). + // + // 6) Set multikv_migration_teardown=false. This step removes runtime configuration for multi KV. It doesn't require a restart of components. + multikv_migration_enabled: false, // Enable multi KV. + multikv_migration_teardown: false, // If multikv_migration_enabled=false and multikv_migration_teardown=true, runtime configuration for multi KV is preserved. + multikv_switch_primary_secondary: false, // Switch primary and secondary KV stores in runtime configuration for multi KV. + multikv_mirror_enabled: false, // Enable mirroring of writes from primary to secondary KV store. + + // Don't change these values during migration. Use multikv_switch_primary_secondary instead. multikv_primary: 'consul', multikv_secondary: 'memberlist', - multikv_switch_primary_secondary: false, - multikv_mirror_enabled: false, // Use memberlist only. This works fine on already-migrated clusters. // To do a migration from Consul to memberlist, multi kv storage needs to be used (See below). From b750cd60cc98743f46e417788e78f7d9c4bdf5e6 Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Tue, 21 Jun 2022 17:46:53 +0200 Subject: [PATCH 23/63] Docs: fix reference to the mimir Helm chart (#2167) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- docs/sources/operators-guide/deploying-grafana-mimir/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/operators-guide/deploying-grafana-mimir/_index.md b/docs/sources/operators-guide/deploying-grafana-mimir/_index.md index 0ac7e8ba99..47ce4d8237 100644 --- a/docs/sources/operators-guide/deploying-grafana-mimir/_index.md +++ b/docs/sources/operators-guide/deploying-grafana-mimir/_index.md @@ -14,7 +14,7 @@ You can use Helm or Tanka to deploy Grafana Mimir on Kubernetes. ## Helm -A [mimir-distributed](https://github.com/grafana/helm-charts/tree/main/charts/mimir-distributed) Helm chart that deploys Grafana Mimir in [microservices mode]({{< relref "../architecture/deployment-modes/index.md#microservices-mode" >}}) is available in the grafana/helm-charts repo. +A [mimir-distributed](https://github.com/grafana/mimir/tree/main/operations/helm/charts/mimir-distributed) Helm chart that deploys Grafana Mimir in [microservices mode]({{< relref "../architecture/deployment-modes/index.md#microservices-mode" >}}) is available in the [grafana/helm-charts](https://grafana.github.io/helm-charts/) Helm repository. ## Jsonnet and Tanka From fb6462b92f43311e82900dcf60793823acaaa899 Mon Sep 17 00:00:00 2001 From: gonzalez Date: Tue, 21 Jun 2022 17:44:38 -0400 Subject: [PATCH 24/63] Add container security context (#2112) * Adding privileged security context update from values file option * fixing check for the non-nilness of all intermediate YAML path elements * update changelog * fixing admin-api change * updating values file from main * updated changelog to follow order * retrieve the full container security context from the values file * fixing trailing spaces * fixing trailing spaces * fixing trailing spaces * fixing trailing spaces . . --- .../charts/mimir-distributed/CHANGELOG.md | 3 +- .../templates/admin-api/admin-api-dep.yaml | 2 +- .../alertmanager/alertmanager-dep.yaml | 2 +- .../compactor/compactor-statefulset.yaml | 2 +- .../distributor/distributor-dep.yaml | 2 +- .../templates/gateway/gateway-dep.yaml | 2 +- .../ingester/ingester-statefulset.yaml | 2 +- .../overrides-exporter-dep.yaml | 2 +- .../templates/querier/querier-dep.yaml | 2 +- .../query-frontend/query-frontend-dep.yaml | 2 +- .../templates/ruler/ruler-dep.yaml | 2 +- .../store-gateway-statefulset.yaml | 2 +- .../helm/charts/mimir-distributed/values.yaml | 42 ++++++++++++++++++- 13 files changed, 54 insertions(+), 13 deletions(-) diff --git a/operations/helm/charts/mimir-distributed/CHANGELOG.md b/operations/helm/charts/mimir-distributed/CHANGELOG.md index 9462e5b320..38aa634820 100644 --- a/operations/helm/charts/mimir-distributed/CHANGELOG.md +++ b/operations/helm/charts/mimir-distributed/CHANGELOG.md @@ -12,7 +12,6 @@ Entries should be ordered as follows: Entries should include a reference to the Pull Request that introduced the change. ## main / unreleased - * [CHANGE] Enable multi-tenancy by default. This means `multitenancy_enabled` is now `true` for both Mimir and Enterprise Metrics. Nginx will inject `X-Scope-OrgID=anonymous` header if the header is not present, ensuring backwards compatibility. #2117 * [CHANGE] Change default value for `blocks_storage.bucket_store.chunks_cache.memcached.timeout` to `450ms` to increase use of cached data. #2035 * [FEATURE] Add `mimir-continuous-test` in smoke-test mode. Use `helm test` to run a smoke test of the read + write path. @@ -27,6 +26,8 @@ Entries should include a reference to the Pull Request that introduced the chang * [ENHANCEMENT] Add `extraEnvFrom` capability to all Mimir services to enable injecting secrets via environment variables. #2017 * [ENHANCEMENT] Enable `-config.expand-env=true` option in all Mimir services to be able to take secrets/settings from the environment and inject them into the Mimir configuration file. #2017 * [ENHANCEMENT] Add a simple test for enterprise installation #2027 +* [ENHANCEMENT] Check for the containerSecurityContext in values file. #2112 + ## 2.1.0-beta.7 diff --git a/operations/helm/charts/mimir-distributed/templates/admin-api/admin-api-dep.yaml b/operations/helm/charts/mimir-distributed/templates/admin-api/admin-api-dep.yaml index e5571d0e1f..b9e89b0d70 100644 --- a/operations/helm/charts/mimir-distributed/templates/admin-api/admin-api-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/admin-api/admin-api-dep.yaml @@ -83,7 +83,7 @@ spec: resources: {{- toYaml .Values.admin_api.resources | nindent 12 }} securityContext: - readOnlyRootFilesystem: true + {{- toYaml .Values.admin_api.containerSecurityContext | nindent 12 }} env: {{- with .Values.global.extraEnv }} {{ toYaml . | nindent 12 }} diff --git a/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-dep.yaml b/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-dep.yaml index f48f774c28..979e46ff1d 100644 --- a/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/alertmanager/alertmanager-dep.yaml @@ -86,7 +86,7 @@ spec: resources: {{- toYaml .Values.alertmanager.resources | nindent 12 }} securityContext: - readOnlyRootFilesystem: true + {{- toYaml .Values.alertmanager.containerSecurityContext | nindent 12 }} env: {{- with .Values.global.extraEnv }} {{- toYaml . | nindent 12 }} diff --git a/operations/helm/charts/mimir-distributed/templates/compactor/compactor-statefulset.yaml b/operations/helm/charts/mimir-distributed/templates/compactor/compactor-statefulset.yaml index e2f7562b00..e1c3966abc 100644 --- a/operations/helm/charts/mimir-distributed/templates/compactor/compactor-statefulset.yaml +++ b/operations/helm/charts/mimir-distributed/templates/compactor/compactor-statefulset.yaml @@ -135,7 +135,7 @@ spec: resources: {{- toYaml .Values.compactor.resources | nindent 12 }} securityContext: - readOnlyRootFilesystem: true + {{- toYaml .Values.compactor.containerSecurityContext | nindent 12 }} env: {{- with .Values.global.extraEnv }} {{- toYaml . | nindent 12 }} diff --git a/operations/helm/charts/mimir-distributed/templates/distributor/distributor-dep.yaml b/operations/helm/charts/mimir-distributed/templates/distributor/distributor-dep.yaml index 74d96b1bee..c6faeef823 100644 --- a/operations/helm/charts/mimir-distributed/templates/distributor/distributor-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/distributor/distributor-dep.yaml @@ -82,7 +82,7 @@ spec: resources: {{- toYaml .Values.distributor.resources | nindent 12 }} securityContext: - readOnlyRootFilesystem: true + {{- toYaml .Values.distributor.containerSecurityContext | nindent 12 }} env: {{- with .Values.global.extraEnv }} {{- toYaml . | nindent 12 }} diff --git a/operations/helm/charts/mimir-distributed/templates/gateway/gateway-dep.yaml b/operations/helm/charts/mimir-distributed/templates/gateway/gateway-dep.yaml index 342d291b78..c86079234c 100644 --- a/operations/helm/charts/mimir-distributed/templates/gateway/gateway-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/gateway/gateway-dep.yaml @@ -75,7 +75,7 @@ spec: resources: {{- toYaml .Values.gateway.resources | nindent 12 }} securityContext: - readOnlyRootFilesystem: true + {{- toYaml .Values.gateway.containerSecurityContext | nindent 12 }} env: {{- with .Values.global.extraEnv }} {{ toYaml . | nindent 12 }} diff --git a/operations/helm/charts/mimir-distributed/templates/ingester/ingester-statefulset.yaml b/operations/helm/charts/mimir-distributed/templates/ingester/ingester-statefulset.yaml index 377549e8b7..f21b728e3f 100644 --- a/operations/helm/charts/mimir-distributed/templates/ingester/ingester-statefulset.yaml +++ b/operations/helm/charts/mimir-distributed/templates/ingester/ingester-statefulset.yaml @@ -139,7 +139,7 @@ spec: resources: {{- toYaml .Values.ingester.resources | nindent 12 }} securityContext: - readOnlyRootFilesystem: true + {{- toYaml .Values.ingester.containerSecurityContext | nindent 12 }} env: {{- with .Values.global.extraEnv }} {{- toYaml . | nindent 12 }} diff --git a/operations/helm/charts/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml b/operations/helm/charts/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml index 0b61d974b5..4b760a1257 100644 --- a/operations/helm/charts/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/overrides-exporter/overrides-exporter-dep.yaml @@ -80,7 +80,7 @@ spec: resources: {{- toYaml .Values.overrides_exporter.resources | nindent 12 }} securityContext: - readOnlyRootFilesystem: true + {{- toYaml .Values.overrides_exporter.containerSecurityContext | nindent 12 }} env: {{- with .Values.global.extraEnv }} {{ toYaml . | nindent 12 }} diff --git a/operations/helm/charts/mimir-distributed/templates/querier/querier-dep.yaml b/operations/helm/charts/mimir-distributed/templates/querier/querier-dep.yaml index efa435bf0f..4aadb73859 100644 --- a/operations/helm/charts/mimir-distributed/templates/querier/querier-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/querier/querier-dep.yaml @@ -81,7 +81,7 @@ spec: resources: {{- toYaml .Values.querier.resources | nindent 12 }} securityContext: - readOnlyRootFilesystem: true + {{- toYaml .Values.querier.containerSecurityContext | nindent 12 }} env: {{- with .Values.global.extraEnv }} {{- toYaml . | nindent 12 }} diff --git a/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml b/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml index e033a3f91f..fc5d1319db 100644 --- a/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/query-frontend/query-frontend-dep.yaml @@ -81,7 +81,7 @@ spec: resources: {{- toYaml .Values.query_frontend.resources | nindent 12 }} securityContext: - readOnlyRootFilesystem: true + {{- toYaml .Values.query_frontend.containerSecurityContext | nindent 12 }} env: {{- with .Values.global.extraEnv }} {{- toYaml . | nindent 12 }} diff --git a/operations/helm/charts/mimir-distributed/templates/ruler/ruler-dep.yaml b/operations/helm/charts/mimir-distributed/templates/ruler/ruler-dep.yaml index 345108cb77..ef7e976850 100644 --- a/operations/helm/charts/mimir-distributed/templates/ruler/ruler-dep.yaml +++ b/operations/helm/charts/mimir-distributed/templates/ruler/ruler-dep.yaml @@ -83,7 +83,7 @@ spec: resources: {{- toYaml .Values.ruler.resources | nindent 12 }} securityContext: - readOnlyRootFilesystem: true + {{- toYaml .Values.ruler.containerSecurityContext | nindent 12 }} env: {{- with .Values.global.extraEnv }} {{- toYaml . | nindent 12 }} diff --git a/operations/helm/charts/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml b/operations/helm/charts/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml index df0148b7e6..db08407b22 100644 --- a/operations/helm/charts/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml +++ b/operations/helm/charts/mimir-distributed/templates/store-gateway/store-gateway-statefulset.yaml @@ -135,7 +135,7 @@ spec: resources: {{- toYaml .Values.store_gateway.resources | nindent 12 }} securityContext: - readOnlyRootFilesystem: true + {{- toYaml .Values.store_gateway.containerSecurityContext | nindent 12 }} env: {{- with .Values.global.extraEnv }} {{- toYaml . | nindent 12 }} diff --git a/operations/helm/charts/mimir-distributed/values.yaml b/operations/helm/charts/mimir-distributed/values.yaml index 58c7087bfe..2b8c06845a 100644 --- a/operations/helm/charts/mimir-distributed/values.yaml +++ b/operations/helm/charts/mimir-distributed/values.yaml @@ -464,6 +464,10 @@ distributor: securityContext: {} + # -- The SecurityContext for distributor containers + containerSecurityContext: + readOnlyRootFilesystem: true + strategy: type: RollingUpdate rollingUpdate: @@ -555,6 +559,10 @@ ingester: securityContext: {} + # -- The SecurityContext for ingester containers + containerSecurityContext: + readOnlyRootFilesystem: true + strategy: type: RollingUpdate rollingUpdate: @@ -601,6 +609,10 @@ overrides_exporter: securityContext: {} + # -- The SecurityContext for overrides_exporter containers + containerSecurityContext: + readOnlyRootFilesystem: true + extraArgs: {} persistence: @@ -671,6 +683,10 @@ ruler: securityContext: {} + # -- The SecurityContext for ruler containers + containerSecurityContext: + readOnlyRootFilesystem: true + strategy: type: RollingUpdate rollingUpdate: @@ -737,6 +753,10 @@ querier: securityContext: {} + # -- The SecurityContext for querier containers + containerSecurityContext: + readOnlyRootFilesystem: true + strategy: type: RollingUpdate rollingUpdate: @@ -803,6 +823,10 @@ query_frontend: securityContext: {} + # -- The SecurityContext for query_frontend containers + containerSecurityContext: + readOnlyRootFilesystem: true + strategy: type: RollingUpdate rollingUpdate: @@ -902,6 +926,10 @@ store_gateway: securityContext: {} + # -- The SecurityContext for store_gateway containers + containerSecurityContext: + readOnlyRootFilesystem: true + strategy: type: RollingUpdate @@ -981,7 +1009,6 @@ compactor: # subPath: '' - # compactor data Persistent Volume Storage Class # If defined, storageClassName: # If set to "-", storageClassName: "", which disables dynamic provisioning @@ -999,6 +1026,10 @@ compactor: securityContext: {} + # -- The SecurityContext for compactor containers + containerSecurityContext: + readOnlyRootFilesystem: true + strategy: type: RollingUpdate @@ -1506,6 +1537,10 @@ admin_api: securityContext: {} + # -- The SecurityContext for admin_api containers + containerSecurityContext: + readOnlyRootFilesystem: true + extraArgs: {} persistence: @@ -1561,6 +1596,11 @@ gateway: affinity: {} securityContext: {} + + # -- The SecurityContext for gateway containers + containerSecurityContext: + readOnlyRootFilesystem: true + initContainers: [] extraArgs: {} From c0bc756283c851da52b762bf1d97a7d0c29eca65 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20=C5=A0tibran=C3=BD?= Date: Wed, 22 Jun 2022 07:54:04 +0200 Subject: [PATCH 25/63] Update dskit. Remove memberlist.abort-if-join-fails=false in jsonnet and example code (#2168) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update dskit. Remove memberlist.abort-if-join-fails=false in jsonnet and example code. Signed-off-by: Peter Štibraný * CHANGELOG.md Signed-off-by: Peter Štibraný --- CHANGELOG.md | 1 + cmd/mimir/config-descriptor.json | 2 +- cmd/mimir/help-all.txt.tmpl | 2 +- cmd/mimir/help.txt.tmpl | 2 +- .../tsdb-blocks-storage-s3/config/mimir.yaml | 1 - .../single-process-config-blocks-gossip-1.yaml | 1 - .../single-process-config-blocks-gossip-2.yaml | 1 - .../reference-configuration-parameters/index.md | 2 +- go.mod | 2 +- go.sum | 4 ++-- .../mimir-tests/test-autoscaling-generated.yaml | 7 ------- operations/mimir-tests/test-defaults-generated.yaml | 5 ----- .../test-disable-chunk-streaming-generated.yaml | 7 ------- .../test-memberlist-migration-step-1-generated.yaml | 7 ------- .../test-memberlist-migration-step-2-generated.yaml | 7 ------- .../test-memberlist-migration-step-3-generated.yaml | 7 ------- .../test-memberlist-migration-step-4-generated.yaml | 7 ------- .../test-memberlist-migration-step-5-generated.yaml | 7 ------- ...memberlist-migration-step-6-final-generated.yaml | 7 ------- .../mimir-tests/test-multi-zone-generated.yaml | 11 ----------- ...multi-zone-with-ongoing-migration-generated.yaml | 13 ------------- .../mimir-tests/test-query-sharding-generated.yaml | 7 ------- .../test-ruler-remote-evaluation-generated.yaml | 8 -------- ...ruler-remote-evaluation-migration-generated.yaml | 8 -------- .../test-shuffle-sharding-generated.yaml | 7 ------- ...uffle-sharding-read-path-disabled-generated.yaml | 7 ------- .../mimir-tests/test-storage-azure-generated.yaml | 7 ------- .../mimir-tests/test-storage-gcs-generated.yaml | 7 ------- .../mimir-tests/test-storage-s3-generated.yaml | 7 ------- operations/mimir/memberlist.libsonnet | 1 - .../dskit/kv/memberlist/memberlist_client.go | 2 +- vendor/modules.txt | 2 +- 32 files changed, 10 insertions(+), 156 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 6d308e5007..faad0c5e88 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -16,6 +16,7 @@ * [CHANGE] Ingester: deprecated `-ingester.ring.join-after`. Mimir now behaves as this setting is always set to 0s. This configuration option will be removed in Mimir 2.4.0. #1965 * [CHANGE] Blocks uploaded by ingester no longer contain `__org_id__` label. Compactor now ignores this label and will compact blocks with and without this label together. `mimirconvert` tool will remove the label from blocks as "unknown" label. #1972 * [CHANGE] Querier: deprecated `-querier.shuffle-sharding-ingesters-lookback-period`, instead adding `-querier.shuffle-sharding-ingesters-enabled` to enable or disable shuffle sharding on the read path. The value of `-querier.query-ingesters-within` is now used internally for shuffle sharding lookback. #2110 +* [CHANGE] Memberlist: `-memberlist.abort-if-join-fails` now defaults to false. Previously it defaulted to true. #2168 * [ENHANCEMENT] Distributor: Added limit to prevent tenants from sending excessive number of requests: #1843 * The following CLI flags (and their respective YAML config options) have been added: * `-distributor.request-rate-limit` diff --git a/cmd/mimir/config-descriptor.json b/cmd/mimir/config-descriptor.json index 564721a0fb..4079f6173d 100644 --- a/cmd/mimir/config-descriptor.json +++ b/cmd/mimir/config-descriptor.json @@ -9746,7 +9746,7 @@ "required": false, "desc": "If this node fails to join memberlist cluster, abort.", "fieldValue": null, - "fieldDefaultValue": true, + "fieldDefaultValue": false, "fieldFlag": "memberlist.abort-if-join-fails", "fieldType": "boolean" }, diff --git a/cmd/mimir/help-all.txt.tmpl b/cmd/mimir/help-all.txt.tmpl index 5e98b2b216..425bbc1833 100644 --- a/cmd/mimir/help-all.txt.tmpl +++ b/cmd/mimir/help-all.txt.tmpl @@ -956,7 +956,7 @@ Usage of ./cmd/mimir/mimir: -mem-ballast-size-bytes int Size of memory ballast to allocate. -memberlist.abort-if-join-fails - If this node fails to join memberlist cluster, abort. (default true) + If this node fails to join memberlist cluster, abort. -memberlist.advertise-addr string Gossip address to advertise to other members in the cluster. Used for NAT traversal. -memberlist.advertise-port int diff --git a/cmd/mimir/help.txt.tmpl b/cmd/mimir/help.txt.tmpl index 75e4b5e6fe..f7e9f0e626 100644 --- a/cmd/mimir/help.txt.tmpl +++ b/cmd/mimir/help.txt.tmpl @@ -300,7 +300,7 @@ Usage of ./cmd/mimir/mimir: -log.level value Only log messages with the given severity or above. Valid levels: [debug, info, warn, error] (default info) -memberlist.abort-if-join-fails - If this node fails to join memberlist cluster, abort. (default true) + If this node fails to join memberlist cluster, abort. -memberlist.advertise-addr string Gossip address to advertise to other members in the cluster. Used for NAT traversal. -memberlist.advertise-port int diff --git a/development/tsdb-blocks-storage-s3/config/mimir.yaml b/development/tsdb-blocks-storage-s3/config/mimir.yaml index defad27077..79af7dccfe 100644 --- a/development/tsdb-blocks-storage-s3/config/mimir.yaml +++ b/development/tsdb-blocks-storage-s3/config/mimir.yaml @@ -32,7 +32,6 @@ ingester: memberlist: join_members: - distributor:10001 - abort_if_cluster_join_fails: false rejoin_interval: 10s blocks_storage: diff --git a/docs/configurations/single-process-config-blocks-gossip-1.yaml b/docs/configurations/single-process-config-blocks-gossip-1.yaml index 7cd5d2ff09..a5cf97eb49 100644 --- a/docs/configurations/single-process-config-blocks-gossip-1.yaml +++ b/docs/configurations/single-process-config-blocks-gossip-1.yaml @@ -41,7 +41,6 @@ memberlist: bind_port: 7946 join_members: - localhost:7947 - abort_if_cluster_join_fails: false blocks_storage: tsdb: diff --git a/docs/configurations/single-process-config-blocks-gossip-2.yaml b/docs/configurations/single-process-config-blocks-gossip-2.yaml index fc0b05afc3..900f5a7b08 100644 --- a/docs/configurations/single-process-config-blocks-gossip-2.yaml +++ b/docs/configurations/single-process-config-blocks-gossip-2.yaml @@ -41,7 +41,6 @@ memberlist: bind_port: 7947 join_members: - localhost:7946 - abort_if_cluster_join_fails: false blocks_storage: tsdb: diff --git a/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md b/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md index f10b58f845..f973af1977 100644 --- a/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md +++ b/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md @@ -2529,7 +2529,7 @@ The `memberlist` block configures the Gossip memberlist. # If this node fails to join memberlist cluster, abort. # CLI flag: -memberlist.abort-if-join-fails -[abort_if_cluster_join_fails: | default = true] +[abort_if_cluster_join_fails: | default = false] # (advanced) If not 0, how often to rejoin the cluster. Occasional rejoin can # help to fix the cluster split issue, and is harmless otherwise. For example diff --git a/go.mod b/go.mod index 2478090625..708519af5c 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/golang/snappy v0.0.4 github.com/google/gopacket v1.1.19 github.com/gorilla/mux v1.8.0 - github.com/grafana/dskit v0.0.0-20220613090928-ebb5c6de233d + github.com/grafana/dskit v0.0.0-20220621150600-346c4b6b7755 github.com/grafana/e2e v0.1.1-0.20220519104354-1db01e4751fe github.com/hashicorp/golang-lru v0.5.4 github.com/json-iterator/go v1.1.12 diff --git a/go.sum b/go.sum index ba771e5832..d71c8f8f5a 100644 --- a/go.sum +++ b/go.sum @@ -852,8 +852,8 @@ github.com/gosimple/slug v1.1.1 h1:fRu/digW+NMwBIP+RmviTK97Ho/bEj/C9swrCspN3D4= github.com/gosimple/slug v1.1.1/go.mod h1:ER78kgg1Mv0NQGlXiDe57DpCyfbNywXXZ9mIorhxAf0= github.com/grafana/dskit v0.0.0-20211021180445-3bd016e9d7f1/go.mod h1:uPG2nyK4CtgNDmWv7qyzYcdI+S90kHHRWvHnBtEMBXM= github.com/grafana/dskit v0.0.0-20220112093026-95274ccc858d/go.mod h1:M0/dlftwBvH7+hdNNpjMa/CUXD7gsew67mbkCuDlFXE= -github.com/grafana/dskit v0.0.0-20220613090928-ebb5c6de233d h1:cHmYkgZr5vdkxt7EbzFIwdm9fS/0E3wyTMa1g2vezLg= -github.com/grafana/dskit v0.0.0-20220613090928-ebb5c6de233d/go.mod h1:9It/K30QPyj/FuTqBb/SYnaS4/BJCP5YL4SRfXB7dG0= +github.com/grafana/dskit v0.0.0-20220621150600-346c4b6b7755 h1:94SFQmFDzMUD5h5lmdIPRpDbLMTEWiKGf9WXa0uQ7Ik= +github.com/grafana/dskit v0.0.0-20220621150600-346c4b6b7755/go.mod h1:9It/K30QPyj/FuTqBb/SYnaS4/BJCP5YL4SRfXB7dG0= github.com/grafana/e2e v0.1.1-0.20220519104354-1db01e4751fe h1:mxrRWDjKtob43xF9nEhJthdtCzX35/800Sk7nE//YHQ= github.com/grafana/e2e v0.1.1-0.20220519104354-1db01e4751fe/go.mod h1:+26VJWpczg2OU3D0537acnHSHzhJORpxOs6F+M27tZo= github.com/grafana/memberlist v0.3.1-0.20220425183535-6b97a09b7167 h1:PgEQkGHR4YimSCEGT5IoswN9gJKZDVskf+he6UClCLw= diff --git a/operations/mimir-tests/test-autoscaling-generated.yaml b/operations/mimir-tests/test-autoscaling-generated.yaml index 3fa65be848..02394993f4 100644 --- a/operations/mimir-tests/test-autoscaling-generated.yaml +++ b/operations/mimir-tests/test-autoscaling-generated.yaml @@ -411,7 +411,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=1073741824 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -496,7 +495,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 @@ -724,7 +722,6 @@ spec: - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.backend=gcs @@ -800,7 +797,6 @@ spec: - -alertmanager.sharding-ring.store=memberlist - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -901,7 +897,6 @@ spec: - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1004,7 +999,6 @@ spec: - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1329,7 +1323,6 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml diff --git a/operations/mimir-tests/test-defaults-generated.yaml b/operations/mimir-tests/test-defaults-generated.yaml index 2d94c3455c..8d106ffedc 100644 --- a/operations/mimir-tests/test-defaults-generated.yaml +++ b/operations/mimir-tests/test-defaults-generated.yaml @@ -320,7 +320,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=1073741824 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -406,7 +405,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-address=query-frontend-discovery.default.svc.cluster.local:9095 @@ -566,7 +564,6 @@ spec: - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -669,7 +666,6 @@ spec: - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -994,7 +990,6 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml diff --git a/operations/mimir-tests/test-disable-chunk-streaming-generated.yaml b/operations/mimir-tests/test-disable-chunk-streaming-generated.yaml index 761bc2582a..584621f306 100644 --- a/operations/mimir-tests/test-disable-chunk-streaming-generated.yaml +++ b/operations/mimir-tests/test-disable-chunk-streaming-generated.yaml @@ -412,7 +412,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=1073741824 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -498,7 +497,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 @@ -726,7 +724,6 @@ spec: - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.backend=gcs @@ -802,7 +799,6 @@ spec: - -alertmanager.sharding-ring.store=memberlist - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -903,7 +899,6 @@ spec: - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1006,7 +1001,6 @@ spec: - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1331,7 +1325,6 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml diff --git a/operations/mimir-tests/test-memberlist-migration-step-1-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-1-generated.yaml index b17ead6d68..f3f47d664d 100644 --- a/operations/mimir-tests/test-memberlist-migration-step-1-generated.yaml +++ b/operations/mimir-tests/test-memberlist-migration-step-1-generated.yaml @@ -826,7 +826,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=multi - -mem-ballast-size-bytes=1073741824 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -915,7 +914,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=multi - -mem-ballast-size-bytes=268435456 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 @@ -1149,7 +1147,6 @@ spec: - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - -ingester.ring.store=multi - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.backend=gcs @@ -1234,7 +1231,6 @@ spec: - -alertmanager.sharding-ring.store=multi - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1338,7 +1334,6 @@ spec: - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1444,7 +1439,6 @@ spec: - -ingester.ring.store=multi - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1769,7 +1763,6 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml diff --git a/operations/mimir-tests/test-memberlist-migration-step-2-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-2-generated.yaml index f2ae658fb8..c3d8de22bc 100644 --- a/operations/mimir-tests/test-memberlist-migration-step-2-generated.yaml +++ b/operations/mimir-tests/test-memberlist-migration-step-2-generated.yaml @@ -826,7 +826,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=multi - -mem-ballast-size-bytes=1073741824 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -915,7 +914,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=multi - -mem-ballast-size-bytes=268435456 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 @@ -1149,7 +1147,6 @@ spec: - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - -ingester.ring.store=multi - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.backend=gcs @@ -1234,7 +1231,6 @@ spec: - -alertmanager.sharding-ring.store=multi - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1338,7 +1334,6 @@ spec: - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1444,7 +1439,6 @@ spec: - -ingester.ring.store=multi - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1769,7 +1763,6 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml diff --git a/operations/mimir-tests/test-memberlist-migration-step-3-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-3-generated.yaml index da509001a8..4bd9945b84 100644 --- a/operations/mimir-tests/test-memberlist-migration-step-3-generated.yaml +++ b/operations/mimir-tests/test-memberlist-migration-step-3-generated.yaml @@ -826,7 +826,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=multi - -mem-ballast-size-bytes=1073741824 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -915,7 +914,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=multi - -mem-ballast-size-bytes=268435456 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 @@ -1149,7 +1147,6 @@ spec: - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - -ingester.ring.store=multi - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.backend=gcs @@ -1234,7 +1231,6 @@ spec: - -alertmanager.sharding-ring.store=multi - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1338,7 +1334,6 @@ spec: - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1444,7 +1439,6 @@ spec: - -ingester.ring.store=multi - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1769,7 +1763,6 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml diff --git a/operations/mimir-tests/test-memberlist-migration-step-4-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-4-generated.yaml index 97a59bdd54..8cfe3a0da9 100644 --- a/operations/mimir-tests/test-memberlist-migration-step-4-generated.yaml +++ b/operations/mimir-tests/test-memberlist-migration-step-4-generated.yaml @@ -826,7 +826,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=multi - -mem-ballast-size-bytes=1073741824 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -915,7 +914,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=multi - -mem-ballast-size-bytes=268435456 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 @@ -1149,7 +1147,6 @@ spec: - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - -ingester.ring.store=multi - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.backend=gcs @@ -1234,7 +1231,6 @@ spec: - -alertmanager.sharding-ring.store=multi - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1338,7 +1334,6 @@ spec: - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1444,7 +1439,6 @@ spec: - -ingester.ring.store=multi - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1769,7 +1763,6 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml diff --git a/operations/mimir-tests/test-memberlist-migration-step-5-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-5-generated.yaml index a08e590551..2efbaee732 100644 --- a/operations/mimir-tests/test-memberlist-migration-step-5-generated.yaml +++ b/operations/mimir-tests/test-memberlist-migration-step-5-generated.yaml @@ -414,7 +414,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=1073741824 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -500,7 +499,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 @@ -728,7 +726,6 @@ spec: - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.backend=gcs @@ -804,7 +801,6 @@ spec: - -alertmanager.sharding-ring.store=memberlist - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -905,7 +901,6 @@ spec: - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1008,7 +1003,6 @@ spec: - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1333,7 +1327,6 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml diff --git a/operations/mimir-tests/test-memberlist-migration-step-6-final-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-6-final-generated.yaml index 6ea4883c89..f2fb7d9303 100644 --- a/operations/mimir-tests/test-memberlist-migration-step-6-final-generated.yaml +++ b/operations/mimir-tests/test-memberlist-migration-step-6-final-generated.yaml @@ -411,7 +411,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=1073741824 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -497,7 +496,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 @@ -725,7 +723,6 @@ spec: - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.backend=gcs @@ -801,7 +798,6 @@ spec: - -alertmanager.sharding-ring.store=memberlist - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -902,7 +898,6 @@ spec: - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1005,7 +1000,6 @@ spec: - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1330,7 +1324,6 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml diff --git a/operations/mimir-tests/test-multi-zone-generated.yaml b/operations/mimir-tests/test-multi-zone-generated.yaml index 90b24fa612..a3413ec0bc 100644 --- a/operations/mimir-tests/test-multi-zone-generated.yaml +++ b/operations/mimir-tests/test-multi-zone-generated.yaml @@ -574,7 +574,6 @@ spec: - -ingester.ring.store=memberlist - -ingester.ring.zone-awareness-enabled=true - -mem-ballast-size-bytes=1073741824 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -661,7 +660,6 @@ spec: - -ingester.ring.store=memberlist - -ingester.ring.zone-awareness-enabled=true - -mem-ballast-size-bytes=268435456 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 @@ -936,7 +934,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -ingester.ring.zone-awareness-enabled=true - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.backend=gcs @@ -1013,7 +1010,6 @@ spec: - -alertmanager.sharding-ring.store=memberlist - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1114,7 +1110,6 @@ spec: - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1230,7 +1225,6 @@ spec: - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - -ingester.ring.zone-awareness-enabled=true - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1347,7 +1341,6 @@ spec: - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - -ingester.ring.zone-awareness-enabled=true - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1464,7 +1457,6 @@ spec: - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - -ingester.ring.zone-awareness-enabled=true - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1800,7 +1792,6 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1931,7 +1922,6 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -2062,7 +2052,6 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml diff --git a/operations/mimir-tests/test-multi-zone-with-ongoing-migration-generated.yaml b/operations/mimir-tests/test-multi-zone-with-ongoing-migration-generated.yaml index 86b39b2e87..b7bac675a6 100644 --- a/operations/mimir-tests/test-multi-zone-with-ongoing-migration-generated.yaml +++ b/operations/mimir-tests/test-multi-zone-with-ongoing-migration-generated.yaml @@ -642,7 +642,6 @@ spec: - -ingester.ring.store=memberlist - -ingester.ring.zone-awareness-enabled=true - -mem-ballast-size-bytes=1073741824 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -729,7 +728,6 @@ spec: - -ingester.ring.store=memberlist - -ingester.ring.zone-awareness-enabled=true - -mem-ballast-size-bytes=268435456 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 @@ -1004,7 +1002,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -ingester.ring.zone-awareness-enabled=true - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.backend=gcs @@ -1081,7 +1078,6 @@ spec: - -alertmanager.sharding-ring.store=memberlist - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1182,7 +1178,6 @@ spec: - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1285,7 +1280,6 @@ spec: - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1402,7 +1396,6 @@ spec: - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - -ingester.ring.zone-awareness-enabled=true - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1519,7 +1512,6 @@ spec: - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - -ingester.ring.zone-awareness-enabled=true - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1636,7 +1628,6 @@ spec: - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - -ingester.ring.zone-awareness-enabled=true - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1961,7 +1952,6 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -2089,7 +2079,6 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -2220,7 +2209,6 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -2351,7 +2339,6 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml diff --git a/operations/mimir-tests/test-query-sharding-generated.yaml b/operations/mimir-tests/test-query-sharding-generated.yaml index cfd3fa5f4b..725554697c 100644 --- a/operations/mimir-tests/test-query-sharding-generated.yaml +++ b/operations/mimir-tests/test-query-sharding-generated.yaml @@ -411,7 +411,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=1073741824 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -497,7 +496,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=419430400 @@ -730,7 +728,6 @@ spec: - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.backend=gcs @@ -806,7 +803,6 @@ spec: - -alertmanager.sharding-ring.store=memberlist - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -907,7 +903,6 @@ spec: - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1010,7 +1005,6 @@ spec: - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1335,7 +1329,6 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml diff --git a/operations/mimir-tests/test-ruler-remote-evaluation-generated.yaml b/operations/mimir-tests/test-ruler-remote-evaluation-generated.yaml index 94e2276f6f..9ea97b05c3 100644 --- a/operations/mimir-tests/test-ruler-remote-evaluation-generated.yaml +++ b/operations/mimir-tests/test-ruler-remote-evaluation-generated.yaml @@ -489,7 +489,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=1073741824 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -575,7 +574,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 @@ -803,7 +801,6 @@ spec: - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.backend=gcs @@ -897,7 +894,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 @@ -1105,7 +1101,6 @@ spec: - -alertmanager.sharding-ring.store=memberlist - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1206,7 +1201,6 @@ spec: - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1309,7 +1303,6 @@ spec: - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1634,7 +1627,6 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml diff --git a/operations/mimir-tests/test-ruler-remote-evaluation-migration-generated.yaml b/operations/mimir-tests/test-ruler-remote-evaluation-migration-generated.yaml index 1fff2c5d54..e29d854f01 100644 --- a/operations/mimir-tests/test-ruler-remote-evaluation-migration-generated.yaml +++ b/operations/mimir-tests/test-ruler-remote-evaluation-migration-generated.yaml @@ -489,7 +489,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=1073741824 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -575,7 +574,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 @@ -803,7 +801,6 @@ spec: - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.backend=gcs @@ -896,7 +893,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 @@ -1104,7 +1100,6 @@ spec: - -alertmanager.sharding-ring.store=memberlist - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1205,7 +1200,6 @@ spec: - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1308,7 +1302,6 @@ spec: - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1633,7 +1626,6 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml diff --git a/operations/mimir-tests/test-shuffle-sharding-generated.yaml b/operations/mimir-tests/test-shuffle-sharding-generated.yaml index 09644948d1..a98010d6ff 100644 --- a/operations/mimir-tests/test-shuffle-sharding-generated.yaml +++ b/operations/mimir-tests/test-shuffle-sharding-generated.yaml @@ -412,7 +412,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=1073741824 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -499,7 +498,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 @@ -731,7 +729,6 @@ spec: - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.backend=gcs @@ -809,7 +806,6 @@ spec: - -alertmanager.sharding-ring.store=memberlist - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -910,7 +906,6 @@ spec: - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1014,7 +1009,6 @@ spec: - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1339,7 +1333,6 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml diff --git a/operations/mimir-tests/test-shuffle-sharding-read-path-disabled-generated.yaml b/operations/mimir-tests/test-shuffle-sharding-read-path-disabled-generated.yaml index be61b1ab60..2db4ec5519 100644 --- a/operations/mimir-tests/test-shuffle-sharding-read-path-disabled-generated.yaml +++ b/operations/mimir-tests/test-shuffle-sharding-read-path-disabled-generated.yaml @@ -412,7 +412,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=1073741824 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -498,7 +497,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 @@ -731,7 +729,6 @@ spec: - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.shuffle-sharding-ingesters-enabled=false @@ -810,7 +807,6 @@ spec: - -alertmanager.sharding-ring.store=memberlist - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -911,7 +907,6 @@ spec: - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1015,7 +1010,6 @@ spec: - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1340,7 +1334,6 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml diff --git a/operations/mimir-tests/test-storage-azure-generated.yaml b/operations/mimir-tests/test-storage-azure-generated.yaml index a543dd3e59..7986937149 100644 --- a/operations/mimir-tests/test-storage-azure-generated.yaml +++ b/operations/mimir-tests/test-storage-azure-generated.yaml @@ -411,7 +411,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=1073741824 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -499,7 +498,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 @@ -729,7 +727,6 @@ spec: - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.azure.account-key=rules-account-key @@ -809,7 +806,6 @@ spec: - -alertmanager.sharding-ring.store=memberlist - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -912,7 +908,6 @@ spec: - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1017,7 +1012,6 @@ spec: - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1344,7 +1338,6 @@ spec: - -blocks-storage.bucket-store.metadata-cache.memcached.max-item-size=1048576 - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml diff --git a/operations/mimir-tests/test-storage-gcs-generated.yaml b/operations/mimir-tests/test-storage-gcs-generated.yaml index 6ea4883c89..f2fb7d9303 100644 --- a/operations/mimir-tests/test-storage-gcs-generated.yaml +++ b/operations/mimir-tests/test-storage-gcs-generated.yaml @@ -411,7 +411,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=1073741824 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -497,7 +496,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 @@ -725,7 +723,6 @@ spec: - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.backend=gcs @@ -801,7 +798,6 @@ spec: - -alertmanager.sharding-ring.store=memberlist - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -902,7 +898,6 @@ spec: - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1005,7 +1000,6 @@ spec: - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1330,7 +1324,6 @@ spec: - -blocks-storage.bucket-store.sync-dir=/data/tsdb - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.gcs.bucket-name=blocks-bucket - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml diff --git a/operations/mimir-tests/test-storage-s3-generated.yaml b/operations/mimir-tests/test-storage-s3-generated.yaml index e20001b6a3..9e5cf03d52 100644 --- a/operations/mimir-tests/test-storage-s3-generated.yaml +++ b/operations/mimir-tests/test-storage-s3-generated.yaml @@ -411,7 +411,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=1073741824 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -498,7 +497,6 @@ spec: - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - -mem-ballast-size-bytes=268435456 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -querier.frontend-client.grpc-max-send-msg-size=104857600 @@ -727,7 +725,6 @@ spec: - -ingester.ring.prefix= - -ingester.ring.replication-factor=3 - -ingester.ring.store=memberlist - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -ruler-storage.backend=s3 @@ -806,7 +803,6 @@ spec: - -alertmanager.sharding-ring.store=memberlist - -alertmanager.storage.path=/data - -alertmanager.web.external-url=http://test/alertmanager - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -908,7 +904,6 @@ spec: - -compactor.split-and-merge-shards=0 - -compactor.split-groups=1 - -compactor.symbols-flushers-concurrency=4 - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1012,7 +1007,6 @@ spec: - -ingester.ring.store=memberlist - -ingester.ring.tokens-file-path=/data/tokens - -ingester.ring.unregister-on-shutdown=true - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml @@ -1338,7 +1332,6 @@ spec: - -blocks-storage.bucket-store.sync-interval=15m - -blocks-storage.s3.bucket-name=blocks-bucket - -blocks-storage.s3.endpoint=s3.dualstack.us-east-1.amazonaws.com - - -memberlist.abort-if-join-fails=false - -memberlist.bind-port=7946 - -memberlist.join=gossip-ring.default.svc.cluster.local:7946 - -runtime-config.file=/etc/mimir/overrides.yaml diff --git a/operations/mimir/memberlist.libsonnet b/operations/mimir/memberlist.libsonnet index e7ee7ad558..26170f77ef 100644 --- a/operations/mimir/memberlist.libsonnet +++ b/operations/mimir/memberlist.libsonnet @@ -1,6 +1,5 @@ { local memberlistConfig = { - 'memberlist.abort-if-join-fails': false, 'memberlist.bind-port': gossipRingPort, 'memberlist.join': 'gossip-ring.%s.svc.cluster.local:%d' % [$._config.namespace, gossipRingPort], }, diff --git a/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go b/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go index 57c08926f7..73b9c52cc5 100644 --- a/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go +++ b/vendor/github.com/grafana/dskit/kv/memberlist/memberlist_client.go @@ -180,7 +180,7 @@ func (cfg *KVConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix string) { f.DurationVar(&cfg.MinJoinBackoff, prefix+"memberlist.min-join-backoff", 1*time.Second, "Min backoff duration to join other cluster members.") f.DurationVar(&cfg.MaxJoinBackoff, prefix+"memberlist.max-join-backoff", 1*time.Minute, "Max backoff duration to join other cluster members.") f.IntVar(&cfg.MaxJoinRetries, prefix+"memberlist.max-join-retries", 10, "Max number of retries to join other cluster members.") - f.BoolVar(&cfg.AbortIfJoinFails, prefix+"memberlist.abort-if-join-fails", true, "If this node fails to join memberlist cluster, abort.") + f.BoolVar(&cfg.AbortIfJoinFails, prefix+"memberlist.abort-if-join-fails", cfg.AbortIfJoinFails, "If this node fails to join memberlist cluster, abort.") f.DurationVar(&cfg.RejoinInterval, prefix+"memberlist.rejoin-interval", 0, "If not 0, how often to rejoin the cluster. Occasional rejoin can help to fix the cluster split issue, and is harmless otherwise. For example when using only few components as a seed nodes (via -memberlist.join), then it's recommended to use rejoin. If -memberlist.join points to dynamic service that resolves to all gossiping nodes (eg. Kubernetes headless service), then rejoin is not needed.") f.DurationVar(&cfg.LeftIngestersTimeout, prefix+"memberlist.left-ingesters-timeout", 5*time.Minute, "How long to keep LEFT ingesters in the ring.") f.DurationVar(&cfg.LeaveTimeout, prefix+"memberlist.leave-timeout", 5*time.Second, "Timeout for leaving memberlist cluster.") diff --git a/vendor/modules.txt b/vendor/modules.txt index 3e381c959e..a0393cdf54 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -432,7 +432,7 @@ github.com/gosimple/slug # github.com/grafana-tools/sdk v0.0.0-20211220201350-966b3088eec9 => github.com/colega/grafana-tools-sdk v0.0.0-20220323154849-711bca56d13f ## explicit; go 1.13 github.com/grafana-tools/sdk -# github.com/grafana/dskit v0.0.0-20220613090928-ebb5c6de233d +# github.com/grafana/dskit v0.0.0-20220621150600-346c4b6b7755 ## explicit; go 1.17 github.com/grafana/dskit/backoff github.com/grafana/dskit/concurrency From d97eb6bff9c895dc76db294cf2237cf118445841 Mon Sep 17 00:00:00 2001 From: Eve Meelan <81647476+Eve832@users.noreply.github.com> Date: Wed, 22 Jun 2022 00:30:15 -0700 Subject: [PATCH 26/63] Update scaling-out.md (#2170) * Update scaling-out.md Some typos * Update docs/sources/operators-guide/running-production-environment/scaling-out.md Co-authored-by: Nick Pillitteri <56quarters@users.noreply.github.com> Co-authored-by: Ursula Kallio --- .../running-production-environment/scaling-out.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/sources/operators-guide/running-production-environment/scaling-out.md b/docs/sources/operators-guide/running-production-environment/scaling-out.md index e7951121cd..e4e34a086f 100644 --- a/docs/sources/operators-guide/running-production-environment/scaling-out.md +++ b/docs/sources/operators-guide/running-production-environment/scaling-out.md @@ -8,10 +8,10 @@ weight: 30 # Scaling out Grafana Mimir Grafana Mimir can horizontally scale every component. -Scaling out Grafana Mimir means that to respond to increased load you, can increase the number of replicas of each Grafana Mimir component. +Scaling out Grafana Mimir means that to respond to increased load, you can increase the number of replicas of each Grafana Mimir component. We have designed Grafana Mimir to scale up quickly, safely, and with no manual intervention. -However, be careful when scaling down some of the stateful components as these action can result in writes and reads failures, or receiving partial query results. +However, be careful when scaling down some of the stateful components as these actions can result in writes and reads failures, or partial query results. ## Monolithic mode From bee820659c179617c75f2b12a900ae1fc91545ab Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Wed, 22 Jun 2022 09:43:26 +0200 Subject: [PATCH 27/63] Documented how to configure queriers autoscaling with Jsonnet (#2128) * Documented how to configure queriers autoscaling with Jsonnet Signed-off-by: Marco Pracucci * Update docs/sources/operators-guide/deploying-grafana-mimir/jsonnet/configuring-autoscaling.md Co-authored-by: Oleg Zaytsev * Clarify KEDA is not going to be installed by Mimir jsonnet Signed-off-by: Marco Pracucci * Fixed link Signed-off-by: Marco Pracucci * Apply suggestions from code review Co-authored-by: Ursula Kallio Co-authored-by: Oleg Zaytsev Co-authored-by: Ursula Kallio --- CHANGELOG.md | 1 + .../jsonnet/configuring-autoscaling.md | 102 ++++++++++++++++++ ...guring.md => configuring-low-resources.md} | 9 +- 3 files changed, 107 insertions(+), 5 deletions(-) create mode 100644 docs/sources/operators-guide/deploying-grafana-mimir/jsonnet/configuring-autoscaling.md rename docs/sources/operators-guide/deploying-grafana-mimir/jsonnet/{configuring.md => configuring-low-resources.md} (91%) diff --git a/CHANGELOG.md b/CHANGELOG.md index faad0c5e88..365ddf4e5f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -101,6 +101,7 @@ * [ENHANCEMENT] Clarify "Set rule group" API specification. #1869 * [ENHANCEMENT] Published Mimir jsonnet documentation. #2024 * [ENHANCEMENT] Documented required scrape interval for using alerting and recording rules from Mimir jsonnet. #2147 +* [ENHANCEMENT] Documented how to configure queriers’ autoscaling with Jsonnet. #2128 * [BUGFIX] Fixed ruler configuration used in the getting started guide. #2052 * [BUGFIX] Fixed Mimir Alertmanager datasource in Grafana used by "Play with Grafana Mimir" tutorial. #2115 diff --git a/docs/sources/operators-guide/deploying-grafana-mimir/jsonnet/configuring-autoscaling.md b/docs/sources/operators-guide/deploying-grafana-mimir/jsonnet/configuring-autoscaling.md new file mode 100644 index 0000000000..181e949782 --- /dev/null +++ b/docs/sources/operators-guide/deploying-grafana-mimir/jsonnet/configuring-autoscaling.md @@ -0,0 +1,102 @@ +--- +title: "Configuring Grafana Mimir autoscaling with Jsonnet" +menuTitle: "Configuring autoscaling" +description: "Learn how to configure Grafana Mimir autoscaling when using Jsonnet." +weight: 30 +--- + +# Configuring Grafana Mimir autoscaling with Jsonnet + +Mimir Jsonnet supports autoscaling for the following components: + +- [Querier]({{< relref "../../architecture/components/querier.md" >}}) + +Autoscaling, which is based on Prometheus metrics and [KEDA (Kubernetes-based Event Driven Autoscaler)](https://keda.sh), uses Kubernetes’ Horizontal Pod Autoscaler (HPA). + +HPA is not configured directly in Jsonnet but it's created and updated by KEDA. +KEDA is an operator, running in the Kubernetes cluster, which is responsible to simplify the setup of HPA with custom metrics (Prometheus in our case). + +## How KEDA works + +KEDA is a Kubernetes operator aiming to simplify the wiring between HPA and Prometheus. + +Kubernetes HPA, out of the box, is not capable of autoscaling based on metrics scraped by Prometheus, but it allows to configure a custom metrics API server which proxies metrics from a datasource (e.g. Prometheus) to Kubernetes. +Setting up the custom metrics API server for Prometheus in a Kubernetes can be a tedious operation, so KEDA offers an operator to set it up automatically. +KEDA supports proxying metrics for a variety of sources, including Prometheus. + +### KEDA in a nutshell + +- Runs an operator and an external metrics server. +- The metrics server supports proxying for many metric sources, including Prometheus. +- The operator watches for `ScaledObject` custom resource definition (CRD), defining the minimum and maximum replicas, and scaling trigger metrics of a Deployment or StatefulSet, and then configures the related HPA resource. You don't create the HPA resource in Kubernetes, but the operator creates it for you whenever a `ScaledObject` CRD is created (and keeps it updated for its whole lifecycle). + +Refers to [KEDA documentation](https://keda.sh) for more information. + +### What happens if KEDA is unhealthy + +The autoscaling of deployments is always managed by HPA, which is a native Kubernetes feature. +KEDA, as we use it, never changes the number of replicas of Mimir Deployments or StatefulSets. + +However, if KEDA is not running successfully, there are consequences for Mimir autoscaling too: + +- `keda-operator` is down (not critical): changes to `ScaledObject` CRD will not be reflected to the HPA until the operator will get back online. HPA functionality is not affected. +- `keda-operator-metrics-apiserver` is down (critical): HPA is not able to fetch updated metrics and it will stop scaling the deployment until metrics will be back. The deployment (e.g. queriers) will keep working but, in case of any surge of traffic, HPA will not be able to detect it (because of a lack of metrics) and so will not scale up. + +The [alert `MimirQuerierAutoscalerNotActive`]({{< relref "../../monitoring-grafana-mimir/_index.md" >}}) fires if HPA is unable to scale the deployment for any reason (e.g. unable to scrape metrics from KEDA metrics API server). + +## How Kubernetes HPA works + +Refer to Kubernetes [Horizontal Pod Autoscaling](https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/) documentation to have a full understanding of how HPA works. + +## How to enable autoscaling + +The following Jsonnet configuration snippet shows an example of how to enable Mimir autoscaling with Jsonnet: + +```jsonnet +local mimir = import 'mimir/mimir.libsonnet'; + +mimir { + _config+:: { + // Enable queriers autoscaling. + autoscaling_querier_enabled: true, + autoscaling_querier_min_replicas: 10, + autoscaling_querier_max_replicas: 40, + autoscaling_prometheus_url: 'http://prometheus.default:9090/prometheus', + } +} +``` + +> **Note**: KEDA will not be installed by Mimir jsonnet. You can follow the [Deploying KEDA](https://keda.sh/docs/latest/deploy/) instructions to install it in your Kubernetes cluster. + +## How to disable autoscaling + +There are two options to disable autoscaling in a Mimir cluster: + +1. Set minimum replicas = maximum replicas. +2. Decommission HPA. + +### Set minimum replicas = maximum replicas + +If KEDA and Kubernetes HPA work correctly but the HPA configuration (metric and threshold) are not giving the expected results (e.g. not scaling up when required), a simple solution to bypass the autoscaling algorithm is to set `autoscaling_querier_min_replicas` and `autoscaling_querier_max_replicas` to the same value. + +### Decommission HPA + +To fully decommission HPA in a Mimir cluster you have to: + +1. Set `autoscaling_querier_enabled: false` +2. Manually set the expected number of replicas for the given Mimir component + +The following example shows how to disable querier autoscaler and configure querier Deployment with 10 replicas: + +```jsonnet +local k = import 'github.com/grafana/jsonnet-libs/ksonnet-util/kausal.libsonnet'; +local deployment = k.apps.v1.deployment; + +mimir { + _config+:: { + autoscaling_querier_enabled: false, + }, + + querier_deployment+: deployment.mixin.spec.withReplicas(10), +} +``` diff --git a/docs/sources/operators-guide/deploying-grafana-mimir/jsonnet/configuring.md b/docs/sources/operators-guide/deploying-grafana-mimir/jsonnet/configuring-low-resources.md similarity index 91% rename from docs/sources/operators-guide/deploying-grafana-mimir/jsonnet/configuring.md rename to docs/sources/operators-guide/deploying-grafana-mimir/jsonnet/configuring-low-resources.md index e40dffa95e..cbbedafbbd 100644 --- a/docs/sources/operators-guide/deploying-grafana-mimir/jsonnet/configuring.md +++ b/docs/sources/operators-guide/deploying-grafana-mimir/jsonnet/configuring-low-resources.md @@ -1,14 +1,13 @@ --- -title: "Configuring Grafana Mimir deployment with Jsonnet" -menuTitle: "Configuring deployment" +title: "Configuring Grafana Mimir to use low resources with Jsonnet" +menuTitle: "Configuring low resources" description: "Learn how to configure Grafana Mimir when using Jsonnet." weight: 20 --- -# Configuring Grafana Mimir deployment with Jsonnet +# Configuring Grafana Mimir to use low resources with Jsonnet -Notable features of the Mimir Jsonnet are described here in detail. -To learn how to get started, see [Deploying Grafana Mimir with Jsonnet and Tanka]({{< relref "./deploying.md" >}}). +This page describes how to configure Jsonnet to deploy Grafana Mimir in a Kubernetes cluster with low CPU and memory resources available. ## Anti-affinity From 83b3fb61f826168d14ee382a78aaecf66349349c Mon Sep 17 00:00:00 2001 From: Steve Simpson Date: Wed, 22 Jun 2022 12:11:31 +0200 Subject: [PATCH 28/63] Mixin: Change `MimirRulerTooManyFailedQueries` alert severity to `critical`. (#2165) --- CHANGELOG.md | 1 + operations/mimir-mixin-compiled/alerts.yaml | 2 +- operations/mimir-mixin/alerts/alerts.libsonnet | 2 +- 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 365ddf4e5f..ac38fcba1a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -53,6 +53,7 @@ * [CHANGE] Dashboards: Expose full image tag in "Mimir / Rollout progress" dashboard's "Pod per version panel." #1932 * [CHANGE] Dashboards: Disabled gateway panels by default, because most users don't have a gateway exposing the metrics expected by Mimir dashboards. You can re-enable it setting `gateway_enabled: true` in the mixin config and recompiling the mixin running `make build-mixin`. #1954 * [CHANGE] Alerts: adapt `MimirFrontendQueriesStuck` and `MimirSchedulerQueriesStuck` to consider ruler query path components. #1949 +* [CHANGE] Alerts: Change `MimirRulerTooManyFailedQueries` severity to `critical`. #2165 * [ENHANCEMENT] Dashboards: Add config option `datasource_regex` to customise the regular expression used to select valid datasources for Mimir dashboards. #1802 * [ENHANCEMENT] Dashboards: Added "Mimir / Remote ruler reads" and "Mimir / Remote ruler reads resources" dashboards. #1911 #1937 * [ENHANCEMENT] Dashboards: Make networking panels work for pods created by the mimir-distributed helm chart. #1927 diff --git a/operations/mimir-mixin-compiled/alerts.yaml b/operations/mimir-mixin-compiled/alerts.yaml index 46b42cdfb3..200c07b9e3 100644 --- a/operations/mimir-mixin-compiled/alerts.yaml +++ b/operations/mimir-mixin-compiled/alerts.yaml @@ -321,7 +321,7 @@ groups: ) > 1 for: 5m labels: - severity: warning + severity: critical - alert: MimirRulerMissedEvaluations annotations: message: | diff --git a/operations/mimir-mixin/alerts/alerts.libsonnet b/operations/mimir-mixin/alerts/alerts.libsonnet index e328a5b302..8f7eed2199 100644 --- a/operations/mimir-mixin/alerts/alerts.libsonnet +++ b/operations/mimir-mixin/alerts/alerts.libsonnet @@ -520,7 +520,7 @@ ||| % $._config, 'for': '5m', labels: { - severity: 'warning', + severity: 'critical', }, annotations: { message: ||| From 017a738e9470d70a6d4ac9dd98d30274d12d42df Mon Sep 17 00:00:00 2001 From: Oleg Zaytsev Date: Wed, 22 Jun 2022 16:03:27 +0200 Subject: [PATCH 29/63] Use `dskit/ring.ring.InstanceRegisterDelegate` (#2177) * Bump dskit to 7bb188ccf75 This includes changes from https://github.com/grafana/dskit/pull/177 Signed-off-by: Oleg Zaytsev * Use ring.InstanceRegisterDelegate in alertmanager Signed-off-by: Oleg Zaytsev * Use ring.InstanceRegisterDelegate in ruler Signed-off-by: Oleg Zaytsev * Use ring.InstanceRegisterDelegate in gateway Signed-off-by: Oleg Zaytsev --- go.mod | 2 +- go.sum | 4 +-- pkg/alertmanager/lifecycle.go | 33 ----------------- pkg/alertmanager/multitenant.go | 2 +- pkg/ruler/lifecycle.go | 33 ----------------- pkg/ruler/ruler.go | 2 +- pkg/storegateway/gateway.go | 25 +------------ .../dskit/ring/basic_lifecycler_delegates.go | 35 +++++++++++++++++++ vendor/modules.txt | 2 +- 9 files changed, 42 insertions(+), 96 deletions(-) delete mode 100644 pkg/alertmanager/lifecycle.go delete mode 100644 pkg/ruler/lifecycle.go diff --git a/go.mod b/go.mod index 708519af5c..88af99e74b 100644 --- a/go.mod +++ b/go.mod @@ -16,7 +16,7 @@ require ( github.com/golang/snappy v0.0.4 github.com/google/gopacket v1.1.19 github.com/gorilla/mux v1.8.0 - github.com/grafana/dskit v0.0.0-20220621150600-346c4b6b7755 + github.com/grafana/dskit v0.0.0-20220622130855-7bb188ccf75a github.com/grafana/e2e v0.1.1-0.20220519104354-1db01e4751fe github.com/hashicorp/golang-lru v0.5.4 github.com/json-iterator/go v1.1.12 diff --git a/go.sum b/go.sum index d71c8f8f5a..8fe627d048 100644 --- a/go.sum +++ b/go.sum @@ -852,8 +852,8 @@ github.com/gosimple/slug v1.1.1 h1:fRu/digW+NMwBIP+RmviTK97Ho/bEj/C9swrCspN3D4= github.com/gosimple/slug v1.1.1/go.mod h1:ER78kgg1Mv0NQGlXiDe57DpCyfbNywXXZ9mIorhxAf0= github.com/grafana/dskit v0.0.0-20211021180445-3bd016e9d7f1/go.mod h1:uPG2nyK4CtgNDmWv7qyzYcdI+S90kHHRWvHnBtEMBXM= github.com/grafana/dskit v0.0.0-20220112093026-95274ccc858d/go.mod h1:M0/dlftwBvH7+hdNNpjMa/CUXD7gsew67mbkCuDlFXE= -github.com/grafana/dskit v0.0.0-20220621150600-346c4b6b7755 h1:94SFQmFDzMUD5h5lmdIPRpDbLMTEWiKGf9WXa0uQ7Ik= -github.com/grafana/dskit v0.0.0-20220621150600-346c4b6b7755/go.mod h1:9It/K30QPyj/FuTqBb/SYnaS4/BJCP5YL4SRfXB7dG0= +github.com/grafana/dskit v0.0.0-20220622130855-7bb188ccf75a h1:D9L8ZU8QlXxUb9lQwiNLamt0nJXXETJVmtF7DxSqc9g= +github.com/grafana/dskit v0.0.0-20220622130855-7bb188ccf75a/go.mod h1:9It/K30QPyj/FuTqBb/SYnaS4/BJCP5YL4SRfXB7dG0= github.com/grafana/e2e v0.1.1-0.20220519104354-1db01e4751fe h1:mxrRWDjKtob43xF9nEhJthdtCzX35/800Sk7nE//YHQ= github.com/grafana/e2e v0.1.1-0.20220519104354-1db01e4751fe/go.mod h1:+26VJWpczg2OU3D0537acnHSHzhJORpxOs6F+M27tZo= github.com/grafana/memberlist v0.3.1-0.20220425183535-6b97a09b7167 h1:PgEQkGHR4YimSCEGT5IoswN9gJKZDVskf+he6UClCLw= diff --git a/pkg/alertmanager/lifecycle.go b/pkg/alertmanager/lifecycle.go deleted file mode 100644 index ad243f35a3..0000000000 --- a/pkg/alertmanager/lifecycle.go +++ /dev/null @@ -1,33 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// Provenance-includes-location: https://github.com/cortexproject/cortex/blob/master/pkg/alertmanager/lifecycle.go -// Provenance-includes-license: Apache-2.0 -// Provenance-includes-copyright: The Cortex Authors. - -package alertmanager - -import ( - "github.com/grafana/dskit/ring" -) - -func (am *MultitenantAlertmanager) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.InstanceDesc) (ring.InstanceState, ring.Tokens) { - // When we initialize the alertmanager instance in the ring we want to start from - // a clean situation, so whatever is the state we set it JOINING, while we keep existing - // tokens (if any). - var tokens []uint32 - if instanceExists { - tokens = instanceDesc.GetTokens() - } - - _, takenTokens := ringDesc.TokensFor(instanceID) - newTokens := ring.GenerateTokens(RingNumTokens-len(tokens), takenTokens) - - // Tokens sorting will be enforced by the parent caller. - tokens = append(tokens, newTokens...) - - return ring.JOINING, tokens -} - -func (am *MultitenantAlertmanager) OnRingInstanceTokens(_ *ring.BasicLifecycler, _ ring.Tokens) {} -func (am *MultitenantAlertmanager) OnRingInstanceStopping(_ *ring.BasicLifecycler) {} -func (am *MultitenantAlertmanager) OnRingInstanceHeartbeat(_ *ring.BasicLifecycler, _ *ring.Desc, _ *ring.InstanceDesc) { -} diff --git a/pkg/alertmanager/multitenant.go b/pkg/alertmanager/multitenant.go index 7ba2864544..48ddd8b3cd 100644 --- a/pkg/alertmanager/multitenant.go +++ b/pkg/alertmanager/multitenant.go @@ -344,7 +344,7 @@ func createMultitenantAlertmanager(cfg *MultitenantAlertmanagerConfig, fallbackC // Define lifecycler delegates in reverse order (last to be called defined first because they're // chained via "next delegate"). - delegate := ring.BasicLifecyclerDelegate(am) + delegate := ring.BasicLifecyclerDelegate(ring.NewInstanceRegisterDelegate(ring.JOINING, RingNumTokens)) delegate = ring.NewLeaveOnStoppingDelegate(delegate, am.logger) delegate = ring.NewAutoForgetDelegate(am.cfg.ShardingRing.HeartbeatTimeout*ringAutoForgetUnhealthyPeriods, delegate, am.logger) diff --git a/pkg/ruler/lifecycle.go b/pkg/ruler/lifecycle.go deleted file mode 100644 index 99ae7213d8..0000000000 --- a/pkg/ruler/lifecycle.go +++ /dev/null @@ -1,33 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// Provenance-includes-location: https://github.com/cortexproject/cortex/blob/master/pkg/ruler/lifecycle.go -// Provenance-includes-license: Apache-2.0 -// Provenance-includes-copyright: The Cortex Authors. - -package ruler - -import ( - "github.com/grafana/dskit/ring" -) - -func (r *Ruler) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.InstanceDesc) (ring.InstanceState, ring.Tokens) { - // When we initialize the ruler instance in the ring we want to start from - // a clean situation, so whatever is the state we set it ACTIVE, while we keep existing - // tokens (if any). - var tokens []uint32 - if instanceExists { - tokens = instanceDesc.GetTokens() - } - - takenTokens := ringDesc.GetTokens() - newTokens := ring.GenerateTokens(r.cfg.Ring.NumTokens-len(tokens), takenTokens) - - // Tokens sorting will be enforced by the parent caller. - tokens = append(tokens, newTokens...) - - return ring.ACTIVE, tokens -} - -func (r *Ruler) OnRingInstanceTokens(_ *ring.BasicLifecycler, _ ring.Tokens) {} -func (r *Ruler) OnRingInstanceStopping(_ *ring.BasicLifecycler) {} -func (r *Ruler) OnRingInstanceHeartbeat(_ *ring.BasicLifecycler, _ *ring.Desc, _ *ring.InstanceDesc) { -} diff --git a/pkg/ruler/ruler.go b/pkg/ruler/ruler.go index 52c39193e0..5806ce57fa 100644 --- a/pkg/ruler/ruler.go +++ b/pkg/ruler/ruler.go @@ -310,7 +310,7 @@ func enableSharding(r *Ruler, ringStore kv.Client) error { // Define lifecycler delegates in reverse order (last to be called defined first because they're // chained via "next delegate"). - delegate := ring.BasicLifecyclerDelegate(r) + delegate := ring.BasicLifecyclerDelegate(ring.NewInstanceRegisterDelegate(ring.ACTIVE, r.cfg.Ring.NumTokens)) delegate = ring.NewLeaveOnStoppingDelegate(delegate, r.logger) delegate = ring.NewAutoForgetDelegate(r.cfg.Ring.HeartbeatTimeout*ringAutoForgetUnhealthyPeriods, delegate, r.logger) diff --git a/pkg/storegateway/gateway.go b/pkg/storegateway/gateway.go index 22a5e90f9a..8a4adcd9e7 100644 --- a/pkg/storegateway/gateway.go +++ b/pkg/storegateway/gateway.go @@ -147,7 +147,7 @@ func newStoreGateway(gatewayCfg Config, storageCfg mimir_tsdb.BlocksStorageConfi // Define lifecycler delegates in reverse order (last to be called defined first because they're // chained via "next delegate"). - delegate := ring.BasicLifecyclerDelegate(g) + delegate := ring.BasicLifecyclerDelegate(ring.NewInstanceRegisterDelegate(ring.JOINING, RingNumTokens)) delegate = ring.NewLeaveOnStoppingDelegate(delegate, logger) delegate = ring.NewTokensPersistencyDelegate(gatewayCfg.ShardingRing.TokensFilePath, ring.JOINING, delegate, logger) delegate = ring.NewAutoForgetDelegate(ringAutoForgetUnhealthyPeriods*gatewayCfg.ShardingRing.HeartbeatTimeout, delegate, logger) @@ -357,29 +357,6 @@ func requestActivity(ctx context.Context, name string, req interface{}) string { return fmt.Sprintf("%s: user=%q trace=%q request=%v", name, user, traceID, req) } -func (g *StoreGateway) OnRingInstanceRegister(_ *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.InstanceDesc) (ring.InstanceState, ring.Tokens) { - // When we initialize the store-gateway instance in the ring we want to start from - // a clean situation, so whatever is the state we set it JOINING, while we keep existing - // tokens (if any) or the ones loaded from file. - var tokens []uint32 - if instanceExists { - tokens = instanceDesc.GetTokens() - } - - takenTokens := ringDesc.GetTokens() - newTokens := ring.GenerateTokens(RingNumTokens-len(tokens), takenTokens) - - // Tokens sorting will be enforced by the parent caller. - tokens = append(tokens, newTokens...) - - return ring.JOINING, tokens -} - -func (g *StoreGateway) OnRingInstanceTokens(_ *ring.BasicLifecycler, _ ring.Tokens) {} -func (g *StoreGateway) OnRingInstanceStopping(_ *ring.BasicLifecycler) {} -func (g *StoreGateway) OnRingInstanceHeartbeat(_ *ring.BasicLifecycler, _ *ring.Desc, _ *ring.InstanceDesc) { -} - func createBucketClient(cfg mimir_tsdb.BlocksStorageConfig, logger log.Logger, reg prometheus.Registerer) (objstore.Bucket, error) { bucketClient, err := bucket.NewClient(context.Background(), cfg.Bucket, "store-gateway", logger, reg) if err != nil { diff --git a/vendor/github.com/grafana/dskit/ring/basic_lifecycler_delegates.go b/vendor/github.com/grafana/dskit/ring/basic_lifecycler_delegates.go index 26e3cfa41d..177962f697 100644 --- a/vendor/github.com/grafana/dskit/ring/basic_lifecycler_delegates.go +++ b/vendor/github.com/grafana/dskit/ring/basic_lifecycler_delegates.go @@ -150,3 +150,38 @@ func (d *AutoForgetDelegate) OnRingInstanceHeartbeat(lifecycler *BasicLifecycler d.next.OnRingInstanceHeartbeat(lifecycler, ringDesc, instanceDesc) } + +// InstanceRegisterDelegate generates a new set of tokenCount tokens on instance register, and returns the registerState InstanceState. +type InstanceRegisterDelegate struct { + registerState InstanceState + tokenCount int +} + +func NewInstanceRegisterDelegate(state InstanceState, tokenCount int) InstanceRegisterDelegate { + return InstanceRegisterDelegate{ + registerState: state, + tokenCount: tokenCount, + } +} + +func (d InstanceRegisterDelegate) OnRingInstanceRegister(_ *BasicLifecycler, ringDesc Desc, instanceExists bool, instanceID string, instanceDesc InstanceDesc) (InstanceState, Tokens) { + // Keep the existing tokens if any, otherwise start with a clean situation. + var tokens []uint32 + if instanceExists { + tokens = instanceDesc.GetTokens() + } + + takenTokens := ringDesc.GetTokens() + newTokens := GenerateTokens(d.tokenCount-len(tokens), takenTokens) + + // Tokens sorting will be enforced by the parent caller. + tokens = append(tokens, newTokens...) + + return d.registerState, tokens +} + +func (d InstanceRegisterDelegate) OnRingInstanceTokens(*BasicLifecycler, Tokens) {} + +func (d InstanceRegisterDelegate) OnRingInstanceStopping(*BasicLifecycler) {} + +func (d InstanceRegisterDelegate) OnRingInstanceHeartbeat(*BasicLifecycler, *Desc, *InstanceDesc) {} diff --git a/vendor/modules.txt b/vendor/modules.txt index a0393cdf54..be715c7b4b 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -432,7 +432,7 @@ github.com/gosimple/slug # github.com/grafana-tools/sdk v0.0.0-20211220201350-966b3088eec9 => github.com/colega/grafana-tools-sdk v0.0.0-20220323154849-711bca56d13f ## explicit; go 1.13 github.com/grafana-tools/sdk -# github.com/grafana/dskit v0.0.0-20220621150600-346c4b6b7755 +# github.com/grafana/dskit v0.0.0-20220622130855-7bb188ccf75a ## explicit; go 1.17 github.com/grafana/dskit/backoff github.com/grafana/dskit/concurrency From 354882db21ae845d0bc3b5c78777221e9b3f59ab Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> Date: Thu, 23 Jun 2022 14:23:11 +0530 Subject: [PATCH 30/63] Vendor latest mimir-prometheus (#2176) * Vendor latest mimir-prometheus Signed-off-by: Ganesh Vernekar * Update Thanos with a fork Signed-off-by: Ganesh Vernekar * Fix CI/build Signed-off-by: Ganesh Vernekar * Use thanos from grafana/thanos Signed-off-by: Ganesh Vernekar --- go.mod | 6 +- go.sum | 365 +-------------- pkg/compactor/bucket_compactor_e2e_test.go | 2 +- pkg/ingester/ingester.go | 2 +- pkg/querier/querier_test.go | 2 +- pkg/storegateway/bucket_test.go | 14 +- pkg/storegateway/postings_codec_test.go | 2 +- pkg/storegateway/testhelper/testhelper.go | 2 +- tools/tsdb-index-health/main.go | 2 +- tools/tsdb-print-chunk/main.go | 2 +- .../prometheus/prometheus/config/config.go | 27 ++ .../prometheus/storage/interface.go | 7 +- .../prometheus/prometheus/tsdb/block.go | 2 +- .../prometheus/prometheus/tsdb/blockwriter.go | 2 +- .../prometheus/tsdb/chunkenc/chunk.go | 21 +- .../prometheus/tsdb/chunkenc/ooo.go | 80 ++++ .../prometheus/tsdb/chunkenc/xor.go | 9 + .../prometheus/tsdb/chunks/chunks.go | 13 +- .../prometheus/tsdb/chunks/head_chunks.go | 23 +- .../prometheus/tsdb/chunks/old_head_chunks.go | 11 +- .../prometheus/prometheus/tsdb/compact.go | 129 ++++++ .../prometheus/prometheus/tsdb/db.go | 261 +++++++++-- .../prometheus/prometheus/tsdb/head.go | 436 ++++++++++++++---- .../prometheus/prometheus/tsdb/head_append.go | 301 ++++++++++-- .../prometheus/prometheus/tsdb/head_read.go | 267 ++++++++++- .../prometheus/prometheus/tsdb/head_wal.go | 290 +++++++++++- .../prometheus/prometheus/tsdb/ooo_head.go | 74 +++ .../prometheus/tsdb/ooo_head_read.go | 409 ++++++++++++++++ .../prometheus/prometheus/tsdb/querier.go | 4 +- .../prometheus/tsdb/record/record.go | 50 +- .../prometheus/prometheus/tsdb/wal/wal.go | 47 +- .../thanos-io/thanos/pkg/block/index.go | 2 +- .../pkg/compact/downsample/downsample.go | 2 +- vendor/modules.txt | 7 +- 34 files changed, 2295 insertions(+), 578 deletions(-) create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/chunkenc/ooo.go create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go create mode 100644 vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go diff --git a/go.mod b/go.mod index 88af99e74b..9d5cfd6292 100644 --- a/go.mod +++ b/go.mod @@ -227,7 +227,11 @@ replace git.apache.org/thrift.git => github.com/apache/thrift v0.0.0-20180902110 replace github.com/bradfitz/gomemcache => github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab // Using a fork of Prometheus while we work on querysharding to avoid a dependency on the upstream. -replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20220614075514-f2aba4af80e4 +replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20220622114521-df59320886e0 + +// Out of order Support forces us to fork thanos because we've changed the ChunkReader interface. +// Once the out of order support is upstreamed and Thanos has vendored it, we can remove this override. +replace github.com/thanos-io/thanos => github.com/grafana/thanos v0.19.1-0.20220610094531-ab07eb568317 // Pin hashicorp depencencies since the Prometheus fork, go mod tries to update them. replace github.com/hashicorp/go-immutable-radix => github.com/hashicorp/go-immutable-radix v1.2.0 diff --git a/go.sum b/go.sum index 8fe627d048..c4cb37a7c6 100644 --- a/go.sum +++ b/go.sum @@ -5,9 +5,7 @@ cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6A cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= -cloud.google.com/go v0.49.0/go.mod h1:hGvAdzcWNbyuxS3nWhD7H2cIJxjRRTRLQVB0bdputVY= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= -cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= @@ -38,8 +36,6 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/bigtable v1.1.0/go.mod h1:B6ByKcIdYmhoyDzmOnQxyOhN6r05qnewYIxxG6L0/b4= -cloud.google.com/go/bigtable v1.2.0/go.mod h1:JcVAOl45lrTmQfLj7T6TxyMzIN/3FGGcFm+2xVAli2o= cloud.google.com/go/bigtable v1.3.0/go.mod h1:z5EyKrPE8OQmeg4h5MNdKvuSnI9CCT49Ki3f23aBzio= cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow= cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM= @@ -49,7 +45,6 @@ cloud.google.com/go/compute v1.6.1 h1:2sMmt8prCn7DPaG4Pmh0N3Inmc8cT8ae5k1M6VJ9Wq cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc= cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= @@ -57,7 +52,6 @@ cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+ cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= -cloud.google.com/go/storage v1.3.0/go.mod h1:9IAwXhoyBJ7z9LcAwkj0/7NnPzYaPeZxxVp3zm+5IqA= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= @@ -65,35 +59,26 @@ cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09 cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/trace v0.1.0/go.mod h1:wxEwsoeRVPbeSkt7ZC9nWCgmoKQRAoySN7XHW2AmI7g= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= -github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= -github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= github.com/Azure/azure-pipeline-go v0.2.3 h1:7U9HBg1JFK3jHl5qmo4CTZKFTVgMwdFHMVtCdfBE21U= github.com/Azure/azure-pipeline-go v0.2.3/go.mod h1:x841ezTBIMG6O3lAcl8ATHnsOPVl2bqk7S3ta6S6u4k= -github.com/Azure/azure-sdk-for-go v36.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible h1:HzKLt3kIwMm4KeJYTdx9EbjRYTySD/t8i1Ee/W5EGXw= github.com/Azure/azure-sdk-for-go v65.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= github.com/Azure/azure-storage-blob-go v0.13.0 h1:lgWHvFh+UYBNVQLFHXkvul2f6yOPA9PIH82RTG2cSwc= github.com/Azure/azure-storage-blob-go v0.13.0/go.mod h1:pA9kNqtjUeQF2zOSu4s//nUdBD+e64lEuc4sVnuOfNs= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-autorest v14.2.0+incompatible h1:V5VMDjClD3GiElqLWO7mz2MxNAK/vTfRHdAubSIPRgs= github.com/Azure/go-autorest v14.2.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= -github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= -github.com/Azure/go-autorest/autorest v0.9.3-0.20191028180845-3492b2aff503/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.11.17/go.mod h1:eipySxLmqSyC5s5k1CLupqet0PSENBEDP93LQ9a8QYw= github.com/Azure/go-autorest/autorest v0.11.18/go.mod h1:dSiJPy22c3u0OtOKDNttNgqpNFY/GeWa7GH/Pz56QRA= github.com/Azure/go-autorest/autorest v0.11.23/go.mod h1:BAWYUWGPEtKPzjVkp0Q6an0MJcJDsoh5Z1BFAEFs4Xs= github.com/Azure/go-autorest/autorest v0.11.27 h1:F3R3q42aWytozkV8ihzcgMO4OA4cuqr3bNlsEuF6//A= github.com/Azure/go-autorest/autorest v0.11.27/go.mod h1:7l8ybrIdUmGqZMTD0sRtAr8NvbHjfofbf8RSP2q7w7U= -github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= -github.com/Azure/go-autorest/autorest/adal v0.8.1-0.20191028180845-3492b2aff503/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.9.2/go.mod h1:/3SMAM86bP6wC9Ev35peQDUeqFZBMH07vvUOmg4z/fE= github.com/Azure/go-autorest/autorest/adal v0.9.5/go.mod h1:B7KF7jKIeC9Mct5spmyCB/A8CG/sEz1vwIRGv/bbw7A= github.com/Azure/go-autorest/autorest/adal v0.9.11/go.mod h1:nBKAnTomx8gDtl+3ZCJv2v0KACFHWTB2drffI1B68Pk= github.com/Azure/go-autorest/autorest/adal v0.9.13/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= github.com/Azure/go-autorest/autorest/adal v0.9.14/go.mod h1:W/MM4U6nLxnIskrw4UwWzlHfGjwUS50aOsc/I3yuU8M= -github.com/Azure/go-autorest/autorest/adal v0.9.17/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/adal v0.9.18/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= github.com/Azure/go-autorest/autorest/adal v0.9.20 h1:gJ3E98kMpFB1MFqQCvA1yFab8vthOeD4VlFRQULxahg= github.com/Azure/go-autorest/autorest/adal v0.9.20/go.mod h1:XVVeme+LZwABT8K5Lc3hA4nAe8LDBVle26gTrguhhPQ= @@ -101,27 +86,18 @@ github.com/Azure/go-autorest/autorest/azure/auth v0.5.8 h1:TzPg6B6fTZ0G1zBf3T54a github.com/Azure/go-autorest/autorest/azure/auth v0.5.8/go.mod h1:kxyKZTSfKh8OVFWPAgOgQ/frrJgeYQJPyR5fLFmXko4= github.com/Azure/go-autorest/autorest/azure/cli v0.4.2 h1:dMOmEJfkLKW/7JsokJqkyoYSgmR08hi9KrhjZb+JALY= github.com/Azure/go-autorest/autorest/azure/cli v0.4.2/go.mod h1:7qkJkT+j6b+hIpzMOwPChJhTqS8VbsqqgULzMNRugoM= -github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= -github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/date v0.3.0 h1:7gUk1U5M/CQbp9WoqinNzJar+8KY+LPI6wiWrP/myHw= github.com/Azure/go-autorest/autorest/date v0.3.0/go.mod h1:BI0uouVdmngYNUzGWeSYnokU+TrmwEsOqdt8Y6sso74= -github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= -github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935gNZuuIPPVsHlr9DSOxSayd+k= github.com/Azure/go-autorest/autorest/mocks v0.4.2 h1:PGN4EDXnuQbojHbU0UWoNvmu9AGVwYHG9/fkDYhtAfw= github.com/Azure/go-autorest/autorest/mocks v0.4.2/go.mod h1:Vy7OitM9Kei0i1Oj+LvyAWMXJHeKH1MVlzFugfVrmyU= -github.com/Azure/go-autorest/autorest/to v0.3.1-0.20191028180845-3492b2aff503/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= github.com/Azure/go-autorest/autorest/to v0.4.0 h1:oXVqrxakqqV1UZdSazDOPOLvOIz+XA683u8EctwboHk= github.com/Azure/go-autorest/autorest/to v0.4.0/go.mod h1:fE8iZBn7LQR7zH/9XU2NcPR4o9jEImooCeWJcYV/zLE= -github.com/Azure/go-autorest/autorest/validation v0.2.1-0.20191028180845-3492b2aff503/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= github.com/Azure/go-autorest/autorest/validation v0.3.1 h1:AgyqjAd94fwNAoTjl/WQXg4VvFeRFpO+UhNyRXqF1ac= github.com/Azure/go-autorest/autorest/validation v0.3.1/go.mod h1:yhLgjC0Wda5DYXl6JAsWyUe4KVNffhoDhG0zVzUMo3E= -github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= github.com/Azure/go-autorest/logger v0.2.1 h1:IG7i4p/mDa2Ce4TRyAO8IHnVhAVF3RFU+ZtXWSmf4Tg= github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8= -github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/Azure/go-autorest/tracing v0.6.0 h1:TYi4+3m5t6K48TGI9AUdb+IzbnSxvnvUMfuitfgcfuo= github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= @@ -143,11 +119,9 @@ github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8 github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/OneOfOne/xxhash v1.2.6 h1:U68crOE3y3MPttCMQGywZOLrTeF5HHJ3/vDBCJn9/bA= github.com/OneOfOne/xxhash v1.2.6/go.mod h1:eZbhyaAYD41SGSSsnmcpxVoRiQ/MPUTjUdIIOT9Um7Q= -github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/QcloudApi/qcloud_sign_golang v0.0.0-20141224014652-e4130a326409/go.mod h1:1pk82RBxDY/JZnPQrtqHlUFfCctgdorsd9M06fMynOM= @@ -171,14 +145,11 @@ github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAu github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a h1:HbKu58rmZpUGpz5+4FfNmIU+FmZg2P3Xaj2v2bfNWmk= github.com/alicebob/gopher-json v0.0.0-20200520072559-a9ecdc9d1d3a/go.mod h1:SGnFV6hVsYE877CKEZ6tDNTjaSXYUk6QqoIK6PrAtcc= -github.com/alicebob/miniredis v2.5.0+incompatible h1:yBHoLpsyjupjz3NL3MhKMVkR41j82Yjf3KFv7ApYzUI= -github.com/alicebob/miniredis v2.5.0+incompatible/go.mod h1:8HZjEj4yU0dwhYHky+DxYx+6BMjkBbe5ONFIF1MXffk= github.com/alicebob/miniredis/v2 v2.14.3 h1:QWoo2wchYmLgOB6ctlTt2dewQ1Vu6phl+iQbwT8SYGo= github.com/alicebob/miniredis/v2 v2.14.3/go.mod h1:gquAfGbzn92jvtrSC69+6zZnwSODVXVpYDRaGhWaL6I= github.com/aliyun/aliyun-oss-go-sdk v2.0.4+incompatible h1:EaK5256H3ELiyaq5O/Zwd6fnghD6DqmZDQmmzzJklUU= github.com/aliyun/aliyun-oss-go-sdk v2.0.4+incompatible/go.mod h1:T/Aws4fEfogEE9v+HPhhw+CntffsBHJ8nXQCwKr0/g8= github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo29Kk6CurOXKm700vrz8f0KW0JNfpkRJY/8= -github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= @@ -186,7 +157,6 @@ github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hC github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-metrics v0.3.0/go.mod h1:zXjbSimjXTd7vOpY8B0/2LpvNvDoXBuplAD+gJD3GYs= github.com/armon/go-metrics v0.3.3/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= -github.com/armon/go-metrics v0.3.6/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-metrics v0.3.9 h1:O2sNqxBdvq8Eq5xmzljcYzAORli6RWCvEym4cJf9m18= github.com/armon/go-metrics v0.3.9/go.mod h1:4O98XIr/9W0sxpJ8UaYkvjk10Iff7SnFrb4QAOwNTFc= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= @@ -201,17 +171,9 @@ github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:W github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d h1:Byv0BzEl3/e6D5CLfI0j/7hiIEtvGVFPCZ7Ei2oq8iQ= github.com/asaskevich/govalidator v0.0.0-20210307081110-f21760c49a8d/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.15.78/go.mod h1:E3/ieXAlvM0XWO57iftYVDLLvQ824smPP3ATZkfNZeM= github.com/aws/aws-sdk-go v1.17.7/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.48/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.33.5/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= -github.com/aws/aws-sdk-go v1.33.12/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0= github.com/aws/aws-sdk-go v1.34.28/go.mod h1:H7NKnBqNVzoTJpGfLrQkkD+ytBA93eiDYi/+8rV9s48= -github.com/aws/aws-sdk-go v1.35.5/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k= -github.com/aws/aws-sdk-go v1.35.31/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.37.8/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.38.3/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.38.35/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= github.com/aws/aws-sdk-go v1.40.11/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= github.com/aws/aws-sdk-go v1.40.45/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q= @@ -248,7 +210,6 @@ github.com/aws/smithy-go v1.10.0 h1:gsoZQMNHnX+PaghNw4ynPsyGP7aUCqx5sY2dlPQsZ0w= github.com/aws/smithy-go v1.10.0/go.mod h1:SObp3lf9smib00L/v3U2eAKG8FyQ7iLrJnQiAmR5n+E= github.com/baidubce/bce-sdk-go v0.9.81 h1:n8KfThLG9fvGv3A+RtTt/jKhg/FPPRpo+iNnS2r+iPI= github.com/baidubce/bce-sdk-go v0.9.81/go.mod h1:zbYJMQwE4IZuyrJiFO8tO8NbtYiKTFTbwh4eIsqjVdg= -github.com/baiyubin/aliyun-sts-go-sdk v0.0.0-20180326062324-cfa1a18b161f/go.mod h1:AuiFmCCPBSrqvVMvuqFuk0qogytodnVFVSN5CeJB8Gc= github.com/beevik/ntp v0.2.0/go.mod h1:hIHWr+l3+/clUnF44zdK+CWW7fO8dR5cIylAQ76NRpg= github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= @@ -258,29 +219,20 @@ github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= -github.com/blang/semver v3.5.0+incompatible h1:CGxCgetQ64DKk7rdZ++Vfnb1+ogGNnB17OJKJXD2Cfs= -github.com/blang/semver v3.5.0+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= github.com/casbin/casbin/v2 v2.37.0/go.mod h1:vByNa/Fchek0KZUgG5wEsl7iFsiviAYKRtgrQfcJqHg= -github.com/cenkalti/backoff v0.0.0-20181003080854-62661b46c409/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff v1.0.0/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff v2.2.1+incompatible h1:tNowT99t7UNflLxfYYSlKYsBpXdEet03Pg2g16Swow4= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= -github.com/cenkalti/backoff/v4 v4.0.2/go.mod h1:eEew/i+1Q6OrCDZh3WiXYv3+nJwBASZ8Bog/87DQnVg= -github.com/cenkalti/backoff/v4 v4.1.0/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.2/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.0/go.mod h1:dgIUBU3pDso/gPgZ1osOZ0iQf77oPR28Tjxl5dIMyVM= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE= github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -300,7 +252,6 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/readline v1.5.0/go.mod h1:x22KAscuvRqlLoK9CsoYsmxoXZMMFVyOl86cAH8qUic= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/chzyer/test v0.0.0-20210722231415-061457976a23/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= -github.com/cilium/ebpf v0.4.0/go.mod h1:4tRaxcgiL706VnOzHOdBlY8IEAIdxINsQBcU4xJJXRs= github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible/go.mod h1:nmEj6Dob7S7YxXgwXpfOuvO54S+tGdZdw9fuRZt25Ag= github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/mxj v1.8.4/go.mod h1:BVjHeAH+rl9rs6f+QIpeRl0tfu10SXn1pUSa5PVGJng= @@ -313,48 +264,27 @@ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/ github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c/go.mod h1:XGLbWH/ujMcbPbhZq52Nv6UrCghb1yGn//133kEsvDk= -github.com/cockroachdb/datadriven v0.0.0-20190531201743-edce55837238/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/colega/grafana-tools-sdk v0.0.0-20220323154849-711bca56d13f h1:Mc/WpMhT0pzDD5zGjhge7PiO7nkrMME4GuGS1y4HGwk= github.com/colega/grafana-tools-sdk v0.0.0-20220323154849-711bca56d13f/go.mod h1:AHHlOEv1+GGQ3ktHMlhuTUwo3zljV3QJbC0+8o2kn+4= -github.com/containerd/cgroups v1.0.1/go.mod h1:0SJrPIenamHDcZhEcJMNBB85rHcUsw4f25ZfBiPYRkU= github.com/containerd/containerd v1.2.7/go.mod h1:bC6axHOhabU15QhwfG7w5PipXdVtMXFTttgp+kVtyUA= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/etcd v3.3.25+incompatible h1:0GQEw6h3YnuOVdtwygkIfJ+Omx0tZ8/QkVyXI4LkbeY= github.com/coreos/etcd v3.3.25+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20181012123002-c6f51f82210d/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf h1:iW4rZ826su+pqaw19uhpSCzhj44qo35pNgKFGqzDKkU= github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.2 h1:D9/bQk5vlXQFZ6Kwuu6zaiXJ9oTPe68++AzAJc1DzSI= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f h1:lBNOc5arjvs8E5mO2tbpBpLoyyu8B6e44T7hJy6potg= github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/cortexproject/cortex v0.6.1-0.20200228110116-92ab6cbe0995/go.mod h1:3Xa3DjJxtpXqxcMGdk850lcIRb81M0fyY1MQ6udY134= -github.com/cortexproject/cortex v1.2.1-0.20200805064754-d8edc95e2c91/go.mod h1:PVPxNLrxKH+yc8asaJOxuz7TiRmMizFfnSMOnRzM6oM= -github.com/cortexproject/cortex v1.3.1-0.20200923145333-8587ea61fe17/go.mod h1:dJ9gpW7dzQ7z09cKtNN9PfebumgyO4dtNdFQ6eQEed0= -github.com/cortexproject/cortex v1.4.1-0.20201030080541-83ad6df2abea/go.mod h1:kXo5F3jlF7Ky3+I31jt/bXTzOlQjl2X/vGDpy0RY1gU= -github.com/cortexproject/cortex v1.5.1-0.20201111110551-ba512881b076/go.mod h1:zFBGVsvRBfVp6ARXZ7pmiLaGlbjda5ZnA4Y6qSJyrQg= -github.com/cortexproject/cortex v1.6.1-0.20210108144208-6c2dab103f20/go.mod h1:fOsaeeFSyWrjd9nFJO8KVUpsikcxnYsjEzQyjURBoQk= -github.com/cortexproject/cortex v1.6.1-0.20210215155036-dfededd9f331/go.mod h1:8bRHNDawVx8te5lIqJ+/AcNTyfosYNC34Qah7+jX/8c= -github.com/cortexproject/cortex v1.7.1-0.20210224085859-66d6fb5b0d42/go.mod h1:u2dxcHInYbe45wxhLoWVdlFJyDhXewsMcxtnbq/QbH4= -github.com/cortexproject/cortex v1.7.1-0.20210316085356-3fedc1108a49/go.mod h1:/DBOW8TzYBTE/U+O7Whs7i7E2eeeZl1iRVDtIqxn5kg= -github.com/cortexproject/cortex v1.8.1-0.20210422151339-cf1c444e0905/go.mod h1:xxm4/CLvTmDxwE7yXwtClR4dIvkG4S09o5DygPOgc1U= github.com/cortexproject/cortex v1.10.1-0.20211124141505-4e9fc3a2b5ab h1:THN4VQQqsZn5gNwcmQJO1GarnfZkSWfp5824ifoD9fQ= github.com/cortexproject/cortex v1.10.1-0.20211124141505-4e9fc3a2b5ab/go.mod h1:njSBkQ1wUNx9X4knV/j65Pi4ItlJXX4QwXRKoMflJd8= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= @@ -370,7 +300,6 @@ github.com/cznic/ql v1.2.0/go.mod h1:FbpzhyZrqr0PVlK6ury+PoW3T0ODUV22OeWIxcaOrSE github.com/cznic/sortutil v0.0.0-20150617083342-4c7342852e65/go.mod h1:q2w6Bg5jeox1B+QkJ6Wp/+Vn0G/bo3f1uY7Fn3vivIQ= github.com/cznic/strutil v0.0.0-20171016134553-529a34b1c186/go.mod h1:AHHPPPXTw0h6pVabbcbyGRK1DckRn7r/STdZEeIDzZc= github.com/cznic/zappy v0.0.0-20160723133515-2533cb5b45cc/go.mod h1:Y1SNZ4dRUOKXshKUbwUapqNncRrho4mkjQebgEHZLj8= -github.com/davecgh/go-spew v0.0.0-20151105211317-5215b55f46b2/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -380,7 +309,6 @@ github.com/dennwc/varint v1.0.0/go.mod h1:hnItb35rvZvJrbTALZtY/iQfDs48JKRG1RPpgz github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78= github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dgryski/go-sip13 v0.0.0-20200911182023-62edffca9245/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= github.com/dhui/dktest v0.3.0/go.mod h1:cyzIUfGsBEbZ6BT7tnXqAShHSXCZhSNmFl70sZ7c1yc= github.com/digitalocean/godo v1.80.0 h1:ZULJ/fWDM97YtO7Fa+K6hzJLd7+smCu4N+0n+B/xtj4= @@ -403,7 +331,6 @@ github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5Xh github.com/docker/go-units v0.3.3/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= -github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= @@ -415,7 +342,6 @@ github.com/edsrzf/mmap-go v0.0.0-20170320065105-0bce6a688712/go.mod h1:YO35OhQPt github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/edsrzf/mmap-go v1.1.0 h1:6EUwBLQ/Mcr1EYLE4Tn1VdW1A4ckqCQWZBw8Hr0kjpQ= github.com/edsrzf/mmap-go v1.1.0/go.mod h1:19H/e8pUPLicwkyNgOykDXkJ9F0MHE+Z52B8EIth78Q= -github.com/efficientgo/e2e v0.11.2-0.20211027134903-67d538984a47/go.mod h1:vDnF4AAEZmO0mvyFIATeDJPFaSRM7ywaOnKd61zaSoE= github.com/efficientgo/e2e v0.12.1 h1:ZYNTf09ptlba0I3ZStYaF7gCbevWdalriiX7usOSiFM= github.com/efficientgo/e2e v0.12.1/go.mod h1:xDHUyIqAWyVWU29Lf+BaZoavW7xAbDEvTwHWWI/3bhk= github.com/efficientgo/tools/core v0.0.0-20210129205121-421d0828c9a6/go.mod h1:OmVcnJopJL8d3X3sSXTiypGoUSgFq1aDGmlrdi9dn/M= @@ -423,11 +349,9 @@ github.com/efficientgo/tools/core v0.0.0-20210829154005-c7bad8450208 h1:jIALuFym github.com/efficientgo/tools/core v0.0.0-20210829154005-c7bad8450208/go.mod h1:OmVcnJopJL8d3X3sSXTiypGoUSgFq1aDGmlrdi9dn/M= github.com/efficientgo/tools/extkingpin v0.0.0-20210609125236-d73259166f20 h1:kM/ALyvAnTrwSB+nlKqoKaDnZbInp1YImZvW+gtHwc8= github.com/efficientgo/tools/extkingpin v0.0.0-20210609125236-d73259166f20/go.mod h1:ZV0utlglOczUWv3ih2AbqPSoLoFzdplUYxwV62eZi6Q= -github.com/elastic/go-sysinfo v1.0.1/go.mod h1:O/D5m1VpYLwGjCYzEt63g3Z1uO3jXfwyzzjiW90t8cY= github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= -github.com/elazarl/goproxy v0.0.0-20170405201442-c4fc26588b6e/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/ema/qdisc v0.0.0-20190904071900-b82c76788043/go.mod h1:ix4kG2zvdUd8kEKSW0ZTr1XLks0epFpI4j745DXxlNE= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -439,8 +363,6 @@ github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go. github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.6.7 h1:qcZcULcd/abmQg6dwigimCNEyi4gg31M/xaciQlDml8= github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= -github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb h1:IT4JYU7k4ikYg1SCxNI1/Tieq/NFvh6dzLdgi7eu0tM= github.com/facette/natsort v0.0.0-20181210072756-2cd4dd1e2dcb/go.mod h1:bH6Xx7IW64qjjJq8M2u4dxNaBiDfKK+z/3eGDpXEQhc= @@ -464,7 +386,6 @@ github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHqu github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goblin v0.0.0-20210519012713-85d372ac71e2/go.mod h1:VzmDKDJVZI3aJmnRI9VjAn9nJ8qPPsN1fqzr9dqInIo= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= -github.com/frankban/quicktest v1.11.3/go.mod h1:wRf/ReqHper53s+kmmSZizM8NamnL3IM0I9ntUbOk+k= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= @@ -472,8 +393,6 @@ github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwV github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fsouza/fake-gcs-server v1.7.0/go.mod h1:5XIRs4YvwNbNoz+1JF8j6KLAyDh7RHGAyAK3EP2EsNk= github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= -github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= @@ -518,7 +437,6 @@ github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQH github.com/go-openapi/errors v0.18.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/errors v0.19.3/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= -github.com/go-openapi/errors v0.19.4/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= github.com/go-openapi/errors v0.19.6/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.19.7/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= @@ -526,14 +444,12 @@ github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpX github.com/go-openapi/errors v0.20.0/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= github.com/go-openapi/errors v0.20.2 h1:dxy7PGTqEh94zj2E3h1cUmQQWiM1+aeCROfAr02EmK8= github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.18.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.18.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= @@ -546,7 +462,6 @@ github.com/go-openapi/loads v0.18.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= github.com/go-openapi/loads v0.19.2/go.mod h1:QAskZPMX5V0C2gvfkGZzJlINuP7Hx/4+ix5jWFxsNPs= github.com/go-openapi/loads v0.19.3/go.mod h1:YVfqhUCdahYwR3f3iiwQLhicVRvLlU/WO5WPaZvcvSI= -github.com/go-openapi/loads v0.19.4/go.mod h1:zZVHonKd8DXyxyw4yfnVjPzBjIQcLt0CCsn0N0ZrQsk= github.com/go-openapi/loads v0.19.5/go.mod h1:dswLCAdonkRufe/gSUC3gN8nTSaB9uaS2es0x5/IbjY= github.com/go-openapi/loads v0.19.6/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= github.com/go-openapi/loads v0.19.7/go.mod h1:brCsvE6j8mnbmGBh103PT/QLHfbyDxA4hsKvYBNEGVc= @@ -556,16 +471,13 @@ github.com/go-openapi/loads v0.21.1 h1:Wb3nVZpdEzDTcly8S4HMkey6fjARRzb7iEaySimlD github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= github.com/go-openapi/runtime v0.0.0-20180920151709-4f900dc2ade9/go.mod h1:6v9a6LTXWQCdL8k1AO3cvqx5OtZY/Y9wKTgaoP6YRfA= github.com/go-openapi/runtime v0.19.0/go.mod h1:OwNfisksmmaZse4+gpV3Ne9AyMOlP1lt4sK4FXt0O64= -github.com/go-openapi/runtime v0.19.3/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= github.com/go-openapi/runtime v0.19.4/go.mod h1:X277bwSUBxVlCYR3r7xgZZGKVvBd/29gLDlFGtJ8NL4= github.com/go-openapi/runtime v0.19.15/go.mod h1:dhGWCTKRXlAfGnQG0ONViOZpjfg0m2gUt9nTQPQZuoo= github.com/go-openapi/runtime v0.19.16/go.mod h1:5P9104EJgYcizotuXhEuUrzVc+j1RiSjahULvYmlv98= github.com/go-openapi/runtime v0.19.24/go.mod h1:Lm9YGCeecBnUUkFTxPC4s1+lwrkJ0pthx8YvyjCfkgk= -github.com/go-openapi/runtime v0.19.26/go.mod h1:BvrQtn6iVb2QmiVXRsFAm6ZCAZBpbVKFfN6QWCp582M= github.com/go-openapi/runtime v0.19.29/go.mod h1:BvrQtn6iVb2QmiVXRsFAm6ZCAZBpbVKFfN6QWCp582M= github.com/go-openapi/runtime v0.23.1 h1:/Drg9R96eMmgKJHVWZADz78XbE39/6QiIiB45mc+epo= github.com/go-openapi/runtime v0.23.1/go.mod h1:AKurw9fNre+h3ELZfk6ILsfvPN+bvvlaU/M9q/r9hpk= -github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.18.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= @@ -592,11 +504,9 @@ github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+W github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= github.com/go-openapi/strfmt v0.21.2 h1:5NDNgadiX1Vhemth/TH4gCGopWSTdDjxl60H3B7f+os= github.com/go-openapi/strfmt v0.21.2/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= -github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.18.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.4/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= github.com/go-openapi/swag v0.19.7/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= github.com/go-openapi/swag v0.19.9/go.mod h1:ao+8BpOPyKdpQz3AOJfbeEVpLmWAvlT1IfTe5McPyhY= @@ -609,7 +519,6 @@ github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/e github.com/go-openapi/validate v0.18.0/go.mod h1:Uh4HdOzKt19xGIGm1qHf/ofbX1YQ4Y+MYsct2VUrAJ4= github.com/go-openapi/validate v0.19.2/go.mod h1:1tRCw7m3jtI8eNWEEliiAqUIcBztB2KDnRCRMUi7GTA= github.com/go-openapi/validate v0.19.3/go.mod h1:90Vh6jjkTn+OT1Eefm0ZixWNFjhtOH7vS9k0lo6zwJo= -github.com/go-openapi/validate v0.19.8/go.mod h1:8DJv2CVJQ6kGNpFW6eV9N3JviE1C85nY1c2z52x1Gk4= github.com/go-openapi/validate v0.19.10/go.mod h1:RKEZTUWDkxKQxN2jDT7ZnZi2bhZlbNMAuKvKB+IaGx8= github.com/go-openapi/validate v0.19.12/go.mod h1:Rzou8hA/CBw8donlS6WNEUQupNvUZ0waH08tGe6kAQ4= github.com/go-openapi/validate v0.19.15/go.mod h1:tbn/fdOwYHgrhPBzidZfJC2MIVvs9GA7monOmWBbeCI= @@ -623,8 +532,6 @@ github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTM github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= -github.com/go-redis/redis/v8 v8.0.0-beta.10.0.20200905143926-df7fe4e2ce72/go.mod h1:CJP1ZIHwhosNYwIdaHPZK9vHsM3+roNBaZ7U9Of1DXc= -github.com/go-redis/redis/v8 v8.2.3/go.mod h1:ysgGY09J/QeDYbu3HikWEIPCwaeOkuNoTgKayTEaEOw= github.com/go-redis/redis/v8 v8.11.4 h1:kHoYkfZP6+pe04aFTnhDH6GDROa5yJdHJVNxV3F46Tg= github.com/go-redis/redis/v8 v8.11.4/go.mod h1:2Z2wHZXdQpCDXEGzqMockDpNyYvi2l4Pxt6RJr792+w= github.com/go-resty/resty/v2 v2.1.1-0.20191201195748-d7b97669fe48 h1:JVrqSeQfdhYRFk24TvhTZWU0q8lfCojxZQFi3Ou7+uY= @@ -673,10 +580,8 @@ github.com/gobwas/ws v1.1.0-rc.5 h1:QOAag7FoBaBYYHRqzqkhhd8fq5RTubvI4v3Ft/gDVVQ= github.com/gobwas/ws v1.1.0-rc.5/go.mod h1:nzvNcVha5eUziGrbxFCo6qFIojQHjJV5cLYIbezhfL0= github.com/goccy/go-yaml v1.9.5/go.mod h1:U/jl18uSupI5rdI2jmuCswEA2htH9eXfferR3KfscvA= github.com/gocql/gocql v0.0.0-20190301043612-f6df8288f9b4/go.mod h1:4Fw1eo5iaEhDUs8XyuhSVCVy52Jq3L+/3GJgYkwc+/0= -github.com/gocql/gocql v0.0.0-20200121121104-95d072f1b5bb/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= github.com/gocql/gocql v0.0.0-20200526081602-cd04bd7f22a7/go.mod h1:DL0ekTmBSTdlNF25Orwt/JMzqIq3EJ4MVa/J/uK64OY= github.com/godbus/dbus v0.0.0-20190402143921-271e53dc4968/go.mod h1:/YcGZj5zSblfDWMMoOzV4fas9FZnQYTkDnsGvmh2Grw= -github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gofrs/uuid v4.2.0+incompatible h1:yyYWMnhkhrKwwr8gAOcOCYxOOscHgDS9yZgBrnJfGa0= @@ -685,12 +590,9 @@ github.com/gogo/googleapis v0.0.0-20180223154316-0cd9801be74a/go.mod h1:gf4bu3Q8 github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/googleapis v1.4.0 h1:zgVt4UpGxcqVOw97aRGxT4svlcmdK35fynLNctY32zI= github.com/gogo/googleapis v1.4.0/go.mod h1:5YRNX2z1oM5gXdAkurHa942MDgEJyk02w4OecKY87+c= -github.com/gogo/protobuf v1.0.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.2.2-0.20190723190241-65acae22fc9d/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.2.2-0.20190730201129-28a6bbf47e48/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= @@ -706,14 +608,11 @@ github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGw github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= @@ -722,7 +621,6 @@ github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -744,14 +642,9 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu github.com/golang/snappy v0.0.0-20170215233205-553a64147049/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.2/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/golang/snappy v0.0.3-0.20201103224600-674baa8c7fc3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= -github.com/gomodule/redigo v1.8.4/go.mod h1:P9dn9mFrCBvWhGE1wpxx6fgq7BAeLBk+UUUzlpkBYO0= -github.com/gomodule/redigo v2.0.0+incompatible/go.mod h1:B4C85qUVwatsJoIUNIfCRsp7qO0iAmpGFZ4EELWSbC4= -github.com/google/btree v0.0.0-20180124185431-e89373fe6b4a/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.1 h1:gK4Kx5IaGY9CD5sPJ36FHiBJ6ZXl0kilRiiCj+jdYp4= @@ -780,7 +673,6 @@ github.com/google/go-github/v32 v32.1.0/go.mod h1:rIEpZD9CTDQwDK9GDrtMTycQNA4JU3 github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= -github.com/google/gofuzz v0.0.0-20161122191042-44d81051d367/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= @@ -828,12 +720,7 @@ github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/Oth github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM= github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk= github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.3.1/go.mod h1:on+2t9HRStVgn95RSsFWFz+6Q0Snyqv1awfrALZdbtU= github.com/googleinterns/cloud-operations-api-mock v0.0.0-20200709193332-a1e58c29bdd3/go.mod h1:h/KNeRx7oYU4SpA4SoY7W2/NxDKEEVuwA6j9A27L4OI= -github.com/gophercloud/gophercloud v0.6.0/go.mod h1:GICNByuaEBibcjmjvI7QvYJSZEbGkcYwAR7EZK2WMqM= -github.com/gophercloud/gophercloud v0.12.0/go.mod h1:gmC5oQqMDOMO1t1gq5DquX/yAU808e/4mzjjDA76+Ss= -github.com/gophercloud/gophercloud v0.13.0/go.mod h1:VX0Ibx85B60B5XOrZr6kaNwrmPUzcmMpwxvQ1WQIIWM= github.com/gophercloud/gophercloud v0.25.0 h1:C3Oae7y0fUVQGSsBrb3zliAjdX+riCSEh4lNMejFNI4= github.com/gophercloud/gophercloud v0.25.0/go.mod h1:Q8fZtyi5zZxPS/j9aj3sSxtvj41AdQMDwyo1myduD5c= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -846,7 +733,6 @@ github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gosimple/slug v1.1.1 h1:fRu/digW+NMwBIP+RmviTK97Ho/bEj/C9swrCspN3D4= github.com/gosimple/slug v1.1.1/go.mod h1:ER78kgg1Mv0NQGlXiDe57DpCyfbNywXXZ9mIorhxAf0= @@ -858,15 +744,15 @@ github.com/grafana/e2e v0.1.1-0.20220519104354-1db01e4751fe h1:mxrRWDjKtob43xF9n github.com/grafana/e2e v0.1.1-0.20220519104354-1db01e4751fe/go.mod h1:+26VJWpczg2OU3D0537acnHSHzhJORpxOs6F+M27tZo= github.com/grafana/memberlist v0.3.1-0.20220425183535-6b97a09b7167 h1:PgEQkGHR4YimSCEGT5IoswN9gJKZDVskf+he6UClCLw= github.com/grafana/memberlist v0.3.1-0.20220425183535-6b97a09b7167/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/grafana/mimir-prometheus v0.0.0-20220614075514-f2aba4af80e4 h1:J1n/2uhH02wdD6RU4JEDAfFY0rWrApVKz+b4Gjecjl4= -github.com/grafana/mimir-prometheus v0.0.0-20220614075514-f2aba4af80e4/go.mod h1:evpqrqffGRI38M1zH3IHpmXTeho8IfX5Qpx6Ixpqhyk= +github.com/grafana/mimir-prometheus v0.0.0-20220622114521-df59320886e0 h1:TXqXoFZweHWWTEX26PZY0RfqivxObBz5nOPU2WcnLvc= +github.com/grafana/mimir-prometheus v0.0.0-20220622114521-df59320886e0/go.mod h1:evpqrqffGRI38M1zH3IHpmXTeho8IfX5Qpx6Ixpqhyk= github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 h1:uirlL/j72L93RhV4+mkWhjv0cov2I0MIgPOG9rMDr1k= github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= +github.com/grafana/thanos v0.19.1-0.20220610094531-ab07eb568317 h1:DG++oZD7E6YUm8YNZOu7RwZ8J/Slhcx3iOlKQBY6Oh0= +github.com/grafana/thanos v0.19.1-0.20220610094531-ab07eb568317/go.mod h1:9e/ytDfVepSKxihUWXyy1irj+ipM/DAlOBqsyXs+Y10= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.1.0/go.mod h1:f5nM7jw/oeRSadq3xCzHAvxcr8HZnzsqU6ILg/0NiiE= -github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw= github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-middleware/providers/kit/v2 v2.0.0-20201002093600-73cf2ae9d891/go.mod h1:516cTXxZzi4NBUBbKcwmO4Eqbb6GHAEd3o4N+GYyCBY= @@ -874,30 +760,18 @@ github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-20200501113911-9a95f0fdbf github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2.0.20201207153454-9f6bf00c00a7 h1:guQyUpELu4I0wKgdsRBZDA5blfGiUleuppRSVy9Qbi0= github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.0.0-rc.2.0.20201207153454-9f6bf00c00a7/go.mod h1:GhphxcdlaRyAuBSvo6rV71BvQcvB/vuX8ugCyybuS2k= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.4.1/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/grpc-ecosystem/grpc-gateway/v2 v2.10.2/go.mod h1:chrfS3YoLAlKTRE5cFWvCbt8uGAjshktT4PveTUpsFQ= github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645/go.mod h1:6iZfnjpejD4L/4DwD7NryNaJyCQdzwWwH2MWhCA90Kw= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= -github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= -github.com/hashicorp/consul/api v1.5.0/go.mod h1:LqwrLNW876eYSuUOo4ZLHBcdKc038txr/IMfbLPATa4= -github.com/hashicorp/consul/api v1.7.0/go.mod h1:1NSuaUUkFaJzMasbfq/11wKYWSR67Xn6r2DXKhuDNFg= -github.com/hashicorp/consul/api v1.8.1/go.mod h1:sDjTOq0yUyv5G4h+BqSea7Fn6BU+XbolEz1952UB+mk= github.com/hashicorp/consul/api v1.9.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/api v1.10.1/go.mod h1:XjsvQN+RJGWI2TWy1/kqaE16HrR2J/FWgkYjdZQsX9M= github.com/hashicorp/consul/api v1.13.0 h1:2hnLQ0GjQvw7f3O61jMO8gbasZviZTrt9R8WzgiirHc= github.com/hashicorp/consul/api v1.13.0/go.mod h1:ZlVrynguJKcYr54zGaDbaL3fOvKC9m72FhPvA8T35KQ= -github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= -github.com/hashicorp/consul/sdk v0.5.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= -github.com/hashicorp/consul/sdk v0.6.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= -github.com/hashicorp/consul/sdk v0.7.0/go.mod h1:fY08Y9z5SvJqevyZNy6WWPXiG3KwBPAvlcdx16zZ0fM= github.com/hashicorp/consul/sdk v0.8.0 h1:OJtKBtEjboEZvG6AOUdh4Z1Zbyu0WcxQ0qatRrZHTVU= github.com/hashicorp/consul/sdk v0.8.0/go.mod h1:GBvyrGALthsZObzUGsfgHZQDXjg4lOjagTIwIR1vPms= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= @@ -922,7 +796,6 @@ github.com/hashicorp/go-retryablehttp v0.5.3/go.mod h1:9B5zBasrRhHXnJnui7y6sL7es github.com/hashicorp/go-retryablehttp v0.7.1 h1:sUiuQAnLlbvmExtFQs72iFW/HXeUn8Z1aJLQ4LJJbTQ= github.com/hashicorp/go-retryablehttp v0.7.1/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= -github.com/hashicorp/go-rootcerts v1.0.1/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-rootcerts v1.0.2 h1:jzhAVGtqPKbwpyCPELlgNWhE1znq+qwJtW5Oi2viEzc= github.com/hashicorp/go-rootcerts v1.0.2/go.mod h1:pqUvnprVnM5bf7AOirdbb01K4ccR319Vf4pU3K5EGc8= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= @@ -936,18 +809,13 @@ github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09 github.com/hashicorp/go.net v0.0.1/go.mod h1:hjKkEWcCURg++eb33jQU7oqQcI9XDCnUzHA0oac0k90= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.3/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0mNTz8vQ= github.com/hashicorp/mdns v1.0.1/go.mod h1:4gW7WsVCke5TE7EPeYliwHlRUyBtfCwuFwuMg2DmyNY= github.com/hashicorp/mdns v1.0.4/go.mod h1:mtBihi+LeNXGtG8L9dX59gAEa12BDtBQSp4v/YAJqrc= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= -github.com/hashicorp/serf v0.8.5/go.mod h1:UpNcs7fFbpKIyZaUuSW6EPiH+eZC7OuyFD+wc1oal+k= -github.com/hashicorp/serf v0.9.0/go.mod h1:YL0HO+FifKOW2u1ke99DGVu1zhcpZzNwrLIqBC7vbYU= -github.com/hashicorp/serf v0.9.3/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hashicorp/serf v0.9.5/go.mod h1:UWDWwZeL5cuWDJdl0C6wrvrUwEqtQ4ZKBKKENpqIUyk= github.com/hashicorp/serf v0.9.6 h1:uuEX1kLR6aoda1TBttmJQKDLZE1Ob7KN0NPdE7EtCDc= github.com/hashicorp/serf v0.9.6/go.mod h1:TXZNMjZQijwlDvp+r0b63xZ45H7JmCmgg4gpTwn9UV4= @@ -972,13 +840,10 @@ github.com/ionos-cloud/sdk-go/v6 v6.0.5851 h1:Xjdta3uR5SDLXXl0oahgVIJ+AQNFCyOCuA github.com/ionos-cloud/sdk-go/v6 v6.0.5851/go.mod h1:UE3V/2DjnqD5doOqtjYqzJRMpI1RiwrvuuSEPX1pdnk= github.com/jackc/fake v0.0.0-20150926172116-812a484cc733/go.mod h1:WrMFNQdiFJ80sQsxDoMokWK1W5TQtxBFNpzWTD84ibQ= github.com/jackc/pgx v3.2.0+incompatible/go.mod h1:0ZGrqGqkRlliWnWB4zKnWtjbSWbGkVEFm4TeybAXq+I= -github.com/jessevdk/go-flags v0.0.0-20180331124232-1c38ed7ad0cc/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= -github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -986,7 +851,6 @@ github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfC github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= @@ -994,8 +858,6 @@ github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX github.com/jsimonetti/rtnetlink v0.0.0-20190606172950-9527aa82566a/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= github.com/jsimonetti/rtnetlink v0.0.0-20190830100107-3784a6c7c552/go.mod h1:Oz+70psSo5OFh8DBl0Zv2ACw7Esh6pPUphlvZG9x7uw= github.com/jsimonetti/rtnetlink v0.0.0-20200117123717-f846d4f6c1f4/go.mod h1:WGuG/smIU4J/54PblvSbh+xvCZmpJnFgr3ds6Z55XMQ= -github.com/json-iterator/go v0.0.0-20180612202835-f2b4162afba3/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= -github.com/json-iterator/go v1.1.5/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= @@ -1020,7 +882,6 @@ github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQL github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.9.5/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= -github.com/klauspost/compress v1.13.1/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.4/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.5/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= github.com/klauspost/compress v1.13.6 h1:P76CopJELS0TiO2mebmnzgWaajssP/EszplttgQxcgc= @@ -1040,18 +901,14 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1 h1:Fmg33tUaq4/8ym9TJN1x7sLJnHVwhP33CNkpYV/7rwI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.0.0/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kshvakov/clickhouse v1.3.5/go.mod h1:DMzX7FxRymoNkVgizH0DWAL8Cur7wHLgx3MUnGwJqpE= -github.com/kylelemons/godebug v0.0.0-20160406211939-eadb3ce320cb/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= -github.com/lann/builder v0.0.0-20150808151131-f22ce00fd939/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= github.com/leanovate/gopter v0.2.4 h1:U4YLBggDFhJdqQsG4Na2zX7joVTky9vHaj/AGEwSuXU= @@ -1061,15 +918,12 @@ github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgx github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= -github.com/lightstep/lightstep-tracer-go v0.18.0/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/linode/linodego v1.6.0 h1:y3KgXttj0v6V3HyGtsvdkTl0gIzaAAOdrDXCIwGeh2g= github.com/linode/linodego v1.6.0/go.mod h1:9lmhBsOupR6ke7D9Ioj1bq/ny9pfgFkCLiX7ubq0r08= github.com/lovoo/gcloud-opentracing v0.3.0/go.mod h1:ZFqk2y38kMDDikZPAK7ynTTGuyt17nSPdS3K5e+ZTBY= github.com/lufia/iostat v1.1.0/go.mod h1:rEPNA0xXgjHQjuI5Cy05sLlS2oRcSlWHRLrvh/AQ+Pg= github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190312143242-1de009706dbe/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -1088,9 +942,6 @@ github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope github.com/mattn/go-colorable v0.1.9/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.12 h1:jF+Du6AlPIjs2BiUiQlKOX0rt3SujHxPnksPKZbaA40= github.com/mattn/go-colorable v0.1.12/go.mod h1:u5H1YNBxpqRaxsYJYSkiCWKzEfiAb1Gb520KVy5xxl4= -github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= -github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-ieproxy v0.0.1 h1:qiyop7gCflfhwCzGyeT0gro3sF9AIg9HU98JORTkqfI= github.com/mattn/go-ieproxy v0.0.1/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= @@ -1107,7 +958,6 @@ github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-xmlrpc v0.0.3/go.mod h1:mqc2dz7tP5x5BKlCahN/n+hs7OSZKJkS9JsHNBRlrxA= -github.com/matttproud/golang_protobuf_extensions v1.0.0/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= @@ -1119,12 +969,7 @@ github.com/mdlayher/netlink v1.1.0/go.mod h1:H4WCitaheIsdF9yOYu8CFmCgQthAPIWZmcK github.com/mdlayher/wifi v0.0.0-20190303161829-b1436901ddee/go.mod h1:Evt/EIne46u9PtQbeTx2NTcqURpr5K4SvKtGmBuDPN8= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= -github.com/miekg/dns v1.1.22/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= github.com/miekg/dns v1.1.26/go.mod h1:bPDLeHnStXmXAq1m/Ch/hvfNHr14JKNPMBo3VZKjuso= -github.com/miekg/dns v1.1.30/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.31/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.35/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= -github.com/miekg/dns v1.1.38/go.mod h1:KNUDUusw/aVsxyTYZM1oqvCicbwhgbNgztCETuNZ7xM= github.com/miekg/dns v1.1.41/go.mod h1:p6aan82bvRIyn+zDIv9xYNUpwa73JcSh9BKwknJysuI= github.com/miekg/dns v1.1.43/go.mod h1:+evo5L0630/F6ca/Z9+GAqzhjGyn8/c+TBaOyfEl0V4= github.com/miekg/dns v1.1.45/go.mod h1:e3IlAVfNqAllflbibAZEWOXOQ+Ynzk/dDozDxY7XnME= @@ -1134,9 +979,6 @@ github.com/minio/highwayhash v1.0.1/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLT github.com/minio/highwayhash v1.0.2/go.mod h1:BQskDq+xkJ12lmlUUi7U0M5Swg3EWR+dLTk+kldvVxY= github.com/minio/md5-simd v1.1.0 h1:QPfiOqlZH+Cj9teu0t9b1nTBfPbyTl16Of5MeuShdK4= github.com/minio/md5-simd v1.1.0/go.mod h1:XpBqgZULrMYD3R+M28PcmP0CkI7PEMzB3U77ZrKZ0Gw= -github.com/minio/minio-go/v6 v6.0.44/go.mod h1:qD0lajrGW49lKZLtXKtCB4X/qkMf0a5tBvN2PaZg7Gg= -github.com/minio/minio-go/v6 v6.0.56/go.mod h1:KQMM+/44DSlSGSQWSfRrAZ12FVMmpWNuX37i2AX0jfI= -github.com/minio/minio-go/v7 v7.0.2/go.mod h1:dJ80Mv2HeGkYLH1sqS/ksz07ON6csH3S6JUMSQ2zAns= github.com/minio/minio-go/v7 v7.0.10/go.mod h1:td4gW1ldOsj1PbSNS+WYK43j+P1XVhX/8W8awaYlBFo= github.com/minio/minio-go/v7 v7.0.16-0.20211116163909-d00629356463 h1:SXUyVAheCoi5HvKsrKWzMxMi5eCfZctZcEpJVgFKaMM= github.com/minio/minio-go/v7 v7.0.16-0.20211116163909-d00629356463/go.mod h1:pUV0Pc+hPd1nccgmzQF/EXh48l/Z/yps6QPF1aaie4g= @@ -1169,7 +1011,6 @@ github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6/go.mod h1:E2VnQOmVuvZB6U github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/reflect2 v0.0.0-20180320133207-05fbef0ca5da/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= @@ -1178,7 +1019,6 @@ github.com/modocache/gover v0.0.0-20171022184752-b58185e213c5/go.mod h1:caMODM3P github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/morikuni/aec v0.0.0-20170113033406-39771216ff4c/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= -github.com/mozillazg/go-cos v0.13.0/go.mod h1:Zp6DvvXn0RUOXGJ2chmWt2bLEqRAnJnS3DnAZsJsoaE= github.com/mozillazg/go-httpheader v0.2.1 h1:geV7TrjbL8KXSyvghnFm+NyTux/hxwueTSrwhe88TQQ= github.com/mozillazg/go-httpheader v0.2.1/go.mod h1:jJ8xECTlalr6ValeXYdOF8fFUISeBAdw6E61aqQma60= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= @@ -1202,7 +1042,6 @@ github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi github.com/nats-io/nkeys v0.2.0/go.mod h1:XdZpAbhgyyODYqjTawOnIOI7VlbKSarI9Gfy1tqEu/s= github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= -github.com/ncw/swift v1.0.50/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/ncw/swift v1.0.52 h1:ACF3JufDGgeKp/9mrDgQlEgS8kRYC4XKcuzj/8EJjQU= github.com/ncw/swift v1.0.52/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= @@ -1221,21 +1060,15 @@ github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2f github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= -github.com/onsi/ginkgo v1.14.1/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= -github.com/onsi/gomega v1.4.2/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.10.2/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= github.com/onsi/gomega v1.15.0/go.mod h1:cIuvLEne0aoVhAgh/O6ac0Op8WWw9H6eYCriF+tEHG0= github.com/onsi/gomega v1.16.0 h1:6gjqkI8iiRHMvdccRJM8rVKjCWk6ZIm6FTm3ddIe4/c= @@ -1247,7 +1080,6 @@ github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3I github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM= github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= -github.com/opencontainers/runtime-spec v1.0.2/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opentracing-contrib/go-grpc v0.0.0-20180928155321-4b5a12d3ff02/go.mod h1:JNdpVEzCpXBgIiv4ds+TzhN1hrtxq6ClLrTlT9OQRSc= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e h1:4cPxUYdgaGzZIT5/j0IfqOrrXmq6bG8AwvwisMXpdrg= github.com/opentracing-contrib/go-grpc v0.0.0-20210225150812-73cb765af46e/go.mod h1:DYR5Eij8rJl8h7gblRrOZ8g0kW1umSpKqYIBTgeDtLo= @@ -1258,7 +1090,6 @@ github.com/opentracing-contrib/go-stdlib v1.0.0/go.mod h1:qtI1ogk+2JhVPIXVc6q+NH github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/opentracing/opentracing-go v1.1.1-0.20200124165624-2876d2018785/go.mod h1:C+iumr2ni468+1jvcHXLCdqP9uQnoQbdX93F3aWahWU= github.com/opentracing/opentracing-go v1.2.0 h1:uEJPy/1a5RIPAJ0Ov+OIO8OxWu77jEv+1B0VhjKrZUs= github.com/opentracing/opentracing-go v1.2.0/go.mod h1:GxEUsuufX4nBwe+T+Wl9TAgYrxe9dPLANfrWvHYVTgc= github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxSfWAKL3wpBW7V8scJMt8N8gnaMCS9E/cA= @@ -1271,7 +1102,6 @@ github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FI github.com/pascaldekloe/goe v0.1.0 h1:cBOtyMzM9HTpWjXfbbunk26uA6nG3a8n06Wieeh0MwY= github.com/pascaldekloe/goe v0.1.0/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/pelletier/go-toml v1.4.0/go.mod h1:PN7xzY2wHTK0K9p34ErDQMlFxa51Fk0OUruD3k1mMwo= github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= @@ -1285,43 +1115,26 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/posener/complete v1.2.3/go.mod h1:WZIdtGGp+qx0sLrYKtIRAruyNpv6hFCicSgv7Sy7s/s= github.com/prashantv/gostub v1.1.0/go.mod h1:A5zLQHz7ieHGG7is6LLXLz7I8+3LZzsrV0P1IAHhP5U= -github.com/prometheus/alertmanager v0.19.0/go.mod h1:Eyp94Yi/T+kdeb2qvq66E3RGuph5T/jm/RBVh4yz1xo= -github.com/prometheus/alertmanager v0.21.0/go.mod h1:h7tJ81NA0VLWvWEayi1QltevFkLF3KxmC/malTcT8Go= -github.com/prometheus/alertmanager v0.21.1-0.20200911160112-1fdff6b3f939/go.mod h1:imXRHOP6QTsE0fFsIsAV/cXimS32m7gVZOiUj11m6Ig= -github.com/prometheus/alertmanager v0.21.1-0.20201106142418-c39b78780054/go.mod h1:imXRHOP6QTsE0fFsIsAV/cXimS32m7gVZOiUj11m6Ig= -github.com/prometheus/alertmanager v0.21.1-0.20210310093010-0f9cab6991e6/go.mod h1:MTqVn+vIupE0dzdgo+sMcNCp37SCAi8vPrvKTTnTz9g= -github.com/prometheus/alertmanager v0.21.1-0.20210422101724-8176f78a70e1/go.mod h1:gsEqwD5BHHW9RNKvCuPOrrTMiP5I+faJUyLXvnivHik= github.com/prometheus/alertmanager v0.23.1-0.20210914172521-e35efbddb66a/go.mod h1:U7pGu+z7A9ZKhK8lq1MvIOp5GdVlZjwOYk+S0h3LSbA= github.com/prometheus/alertmanager v0.24.0 h1:HBWR3lk4uy3ys+naDZthDdV7yEsxpaNeZuUS+hJgrOw= github.com/prometheus/alertmanager v0.24.0/go.mod h1:r6fy/D7FRuZh5YbnX6J3MBY0eI4Pb5yPYS7/bPSXXqI= -github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= -github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= -github.com/prometheus/client_golang v1.2.1/go.mod h1:XMU6Z2MjaRKVu/dC1qupJI9SiNkDYzz3xecMgSW/F+U= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= github.com/prometheus/client_golang v1.4.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_golang v1.4.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.6.0/go.mod h1:ZLOG9ck3JLRdB5MgO8f+lLTe83AXG6ro35rLTxvnIl4= github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.8.0/go.mod h1:O9VU6huf47PktckDQfMTX0Y8tY0/7TSWwj+ITvv0TnM= -github.com/prometheus/client_golang v1.9.0/go.mod h1:FqZLKOZnGdFAhOK4nqGHa7D66IdsO+O441Eve7ptJDU= -github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= github.com/prometheus/client_golang v1.12.2 h1:51L9cDoUHVrXx4zWYlcLQIZ+d+VXHgqnYKkIuq4g/34= github.com/prometheus/client_golang v1.12.2/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_model v0.0.0-20170216185247-6f3806018612/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -1329,25 +1142,12 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M= github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/common v0.0.0-20180518154759-7600349dcfe1/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/common v0.8.0/go.mod h1:PC/OgXc+UN7B4ALwvn1yzVZmVwvhXp5JsbBv6wSv6i0= github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.11.1/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.12.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.14.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.20.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.21.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.23.0/go.mod h1:H6QK/N6XVT42whUeIdI3dp36w49c+/iMDk7UAI2qm7Q= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= @@ -1357,33 +1157,23 @@ github.com/prometheus/common v0.34.0/go.mod h1:gB3sOl7P0TvJabZpLY5uQMpUqRCPPCyRL github.com/prometheus/common/assets v0.1.0/go.mod h1:D17UVUE12bHbim7HzwUvtqm6gwBEaDQ0F+hIGbFbccI= github.com/prometheus/common/sigv4 v0.1.0 h1:qoVebwtwwEhS85Czm2dSROY5fTo2PAPEVdDeppTwGX4= github.com/prometheus/common/sigv4 v0.1.0/go.mod h1:2Jkxxk9yYvCkE5G1sQT7GuEXm57JrvHu9k5YwTjsNtI= -github.com/prometheus/exporter-toolkit v0.5.0/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg= -github.com/prometheus/exporter-toolkit v0.5.1/go.mod h1:OCkM4805mmisBhLmVFw858QYi3v0wKdY6/UxrT0pZVg= github.com/prometheus/exporter-toolkit v0.6.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= -github.com/prometheus/exporter-toolkit v0.7.0/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= github.com/prometheus/exporter-toolkit v0.7.1 h1:c6RXaK8xBVercEeUQ4tRNL8UGWzDHfvj9dseo1FcK1Y= github.com/prometheus/exporter-toolkit v0.7.1/go.mod h1:ZUBIj498ePooX9t/2xtDjeQYwvRpiPP2lh5u4iblj2g= github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289 h1:dTUS1vaLWq+Y6XKOTnrFpoVsQKLCbCp1OLj24TDi7oM= github.com/prometheus/node_exporter v1.0.0-rc.0.0.20200428091818-01054558c289/go.mod h1:FGbBv5OPKjch+jNUJmEQpMZytIdyW0NdBtWFcfSKusc= -github.com/prometheus/procfs v0.0.0-20180612222113-7d6f385de8be/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.5/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= -github.com/prometheus/procfs v0.0.6/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/rafaeljusto/redigomock v0.0.0-20190202135759-257e089e14a1/go.mod h1:JaY6n2sDr+z2WTsXkOmNRUfDy6FN0L6Nk7x06ndm4tY= github.com/rainycape/unidecode v0.0.0-20150907023854-cb7f23ec59be h1:ta7tUOvsPHVHGom5hKW5VXNc2xZIkfCKP8iaqOyYtUQ= github.com/rainycape/unidecode v0.0.0-20150907023854-cb7f23ec59be/go.mod h1:MIDFMn7db1kT65GmV94GzpX9Qdi7N/pQlwb+AN8wh+Q= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -1392,8 +1182,6 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rs/cors v1.6.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= github.com/rs/cors v1.8.2 h1:KCooALfAYGs415Cwu5ABvv9n9509fSiG5SQJn/AQo4U= github.com/rs/cors v1.8.2/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= @@ -1404,7 +1192,6 @@ github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb github.com/ryanuber/columnize v2.1.0+incompatible/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= -github.com/satori/go.uuid v0.0.0-20160603004225-b111a074d5ef/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9 h1:0roa6gXKgyta64uqh52AQG3wzZXH21unn+ltzQSXML0= github.com/scaleway/scaleway-sdk-go v1.0.0-beta.9/go.mod h1:fCa7OJZ/9DRTnOKmxvT6pn+LPWUptQAmHF/SBJUGEcg= @@ -1420,16 +1207,13 @@ github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9Nz github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749 h1:bUGsEnyNbVPw06Bs80sCeARAlK8lhwqGyi6UT8ymuGk= github.com/shurcooL/httpfs v0.0.0-20190707220628-8d4bc4ba7749/go.mod h1:ZY1cvUeJuFPAdZ/B6v7RHavJWZn2YPVFQ1OSXhCGOkg= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= -github.com/shurcooL/vfsgen v0.0.0-20181202132449-6a9ea43bcacd/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546 h1:pXY9qYc/MP5zdvqWEUH6SjNiu7VhSjuVFTFiTcphaLU= github.com/shurcooL/vfsgen v0.0.0-20200824052919-0d455de96546/go.mod h1:TrYk7fJVaAttu97ZZKrO9UbRa8izdowaMIZcxYMbVaw= github.com/siebenmann/go-kstat v0.0.0-20160321171754-d34789b79745/go.mod h1:G81aIFAMS9ECrwBYR9YxhlPjWgrItd+Kje78O6+uqm8= -github.com/sirupsen/logrus v1.0.5/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.5.0/go.mod h1:+F7Ogzej0PZc/94MaYx/nvG9jOFMD2osvC3s+Squfpo= github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1 h1:dJKuHgqk1NNQlqoA6BTlM1Wf9DOH3NBjQyu0h9+AZZE= @@ -1437,33 +1221,25 @@ github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/assertions v1.0.1 h1:voD4ITNjPL5jjBfgR/r8fPIIBrliWrWHeiJApdr3r4w= github.com/smartystreets/assertions v1.0.1/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= -github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.5-0.20210205191134-5ec6847320e5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sony/gobreaker v0.4.1 h1:oMnRNZXX5j85zso6xCPRNPtmAycat+WcoKbklScLDgQ= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/soundcloud/go-runit v0.0.0-20150630195641-06ad41a06c4a/go.mod h1:LeFCbQYJ3KJlPs/FvPz2dy1tkpxyeNESVyCNNzRXFR0= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= -github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= github.com/spf13/afero v1.6.0 h1:xoax2sJ2DT8S8xA2paPFjDCScCNeWsg75VG0DLRreiY= github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= -github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= -github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= -github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -1474,7 +1250,6 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= -github.com/stretchr/testify v0.0.0-20151208002404-e3a8ff8ce365/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -1484,64 +1259,33 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.2 h1:4jaiDzPyXQvSd7D0EjG45355tLlV3VOECpq10pLC+8s= github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= -github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/common v1.0.194/go.mod h1:7sCQWVkxcsR38nffDW057DRGk8mUjK1Ing/EFOK8s8Y= github.com/tencentcloud/tencentcloud-sdk-go/tencentcloud/kms v1.0.194/go.mod h1:yrBKWhChnDqNz1xuXdSbWXG56XawEq0G5j1lg4VwBD4= github.com/tencentyun/cos-go-sdk-v5 v0.7.31 h1:NujkkOKMJ3IFs1+trCwXOKRCIPQ8qI5Lxul9JkhTg6M= github.com/tencentyun/cos-go-sdk-v5 v0.7.31/go.mod h1:4E4+bQ2gBVJcgEC9Cufwylio4mXOct2iu05WjgEBx1o= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e h1:f1Zsv7OAU9iQhZwigp50Yl38W10g/vd5NC8Rdk1Jzng= github.com/thanos-community/galaxycache v0.0.0-20211122094458-3a32041a1f1e/go.mod h1:jXcofnrSln/cLI6/dhlBxPQZEEQHVPCcFaH75M+nSzM= -github.com/thanos-io/thanos v0.8.1-0.20200109203923-552ffa4c1a0d/go.mod h1:usT/TxtJQ7DzinTt+G9kinDQmRS5sxwu0unVKZ9vdcw= -github.com/thanos-io/thanos v0.13.1-0.20200731083140-69b87607decf/go.mod h1:G8caR6G7pSDreRDvFm9wFuyjEBztmr8Ag3kBYpa/fEc= -github.com/thanos-io/thanos v0.13.1-0.20200807203500-9b578afb4763/go.mod h1:KyW0a93tsh7v4hXAwo2CVAIRYuZT1Kkf4e04gisQjAg= -github.com/thanos-io/thanos v0.13.1-0.20201019130456-f41940581d9a/go.mod h1:A3qUEEbsVkplJnxyDLwuIuvTDaJPByTH+hMdTl9ujAA= -github.com/thanos-io/thanos v0.13.1-0.20201030101306-47f9a225cc52/go.mod h1:OqqX4x21cg5N5MMHd/yGQAc/V3wg8a7Do4Jk8HfaFZQ= -github.com/thanos-io/thanos v0.13.1-0.20210108102609-f85e4003ba51/go.mod h1:kPvI4H0AynFiHDN95ZB28/k70ZPGCx+pBrRh6RZPimw= -github.com/thanos-io/thanos v0.13.1-0.20210204123931-82545cdd16fe/go.mod h1:ZLDGYRNkgM+FCwYNOD+6tOV+DE2fpjzfV6iqXyOgFIw= -github.com/thanos-io/thanos v0.13.1-0.20210224074000-659446cab117/go.mod h1:kdqFpzdkveIKpNNECVJd75RPvgsAifQgJymwCdfev1w= -github.com/thanos-io/thanos v0.13.1-0.20210226164558-03dace0a1aa1/go.mod h1:gMCy4oCteKTT7VuXVvXLTPGzzjovX1VPE5p+HgL1hyU= -github.com/thanos-io/thanos v0.13.1-0.20210401085038-d7dff0c84d17/go.mod h1:zU8KqE+6A+HksK4wiep8e/3UvCZLm+Wrw9AqZGaAm9k= -github.com/thanos-io/thanos v0.22.0/go.mod h1:SZDWz3phcUcBr4MYFoPFRvl+Z9Nbi45HlwQlwSZSt+Q= -github.com/thanos-io/thanos v0.24.0/go.mod h1:sfnKJG7cDA41ixNL4gsTJEa3w9Qt8lwAjw+dqRMSDG0= -github.com/thanos-io/thanos v0.26.1-0.20220602051129-a6f6ce060ed4 h1:8tOJip28qYUWl9aO4CfgFRUI60xujkdu+DljSbNixSc= -github.com/thanos-io/thanos v0.26.1-0.20220602051129-a6f6ce060ed4/go.mod h1:9e/ytDfVepSKxihUWXyy1irj+ipM/DAlOBqsyXs+Y10= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab h1:7ZR3hmisBWw77ZpO1/o86g+JV3VKlk3d48jopJxzTjU= github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab/go.mod h1:eheTFp954zcWZXCU8d0AT76ftsQOTo4DTqkN/h3k1MY= github.com/tidwall/pretty v0.0.0-20180105212114-65a9db5fad51/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tidwall/pretty v1.0.0 h1:HsD+QiTn7sK6flMKIvNmpqz1qrpP3Ps6jOKIKMooyg4= github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tv42/httpunix v0.0.0-20150427012821-b75d8614f926/go.mod h1:9ESjWnEqriFuLhtthL60Sar/7RFoluCcXsuvEwTV5KM= -github.com/uber/jaeger-client-go v2.15.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-client-go v2.20.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-client-go v2.22.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-client-go v2.24.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-client-go v2.25.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.28.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.29.1+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= github.com/uber/jaeger-client-go v2.30.0+incompatible h1:D6wyKGCecFaSRUpo8lCVbaOOb6ThwMmTEbhRwtKR97o= github.com/uber/jaeger-client-go v2.30.0+incompatible/go.mod h1:WVhlPFC8FDjOFMMWRy2pZqQJSXxYSwNYOkTr/Z6d3Kk= -github.com/uber/jaeger-lib v1.5.1-0.20181102163054-1fc5c315e03c/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/uber/jaeger-lib v2.2.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= -github.com/uber/jaeger-lib v2.4.0+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/uber/jaeger-lib v2.4.1+incompatible h1:td4jdvLcExb4cBISKIpHuGoVXh+dVKhn2Um6rjCsSsg= github.com/uber/jaeger-lib v2.4.1+incompatible/go.mod h1:ComeNDZlWwrWnDv8aPp0Ba6+uUTzImX/AauajbLI56U= github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.2/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vultr/govultr/v2 v2.17.1 h1:UBmotwA0mkGtyJMakUF9jhLH/W3mN5wfGRn543i/BCA= github.com/vultr/govultr/v2 v2.17.1/go.mod h1:ZFOKGWmgjytfyjeyAdhQlSWwTjh2ig+X49cAp50dzXI= -github.com/weaveworks/common v0.0.0-20200206153930-760e36ae819a/go.mod h1:6enWAqfQBFrE8X/XdJwZr8IKgh1chStuFR0mjU/UOUw= -github.com/weaveworks/common v0.0.0-20200625145055-4b1847531bc9/go.mod h1:c98fKi5B9u8OsKGiWHLRKus6ToQ1Tubeow44ECO1uxY= -github.com/weaveworks/common v0.0.0-20200914083218-61ffdd448099/go.mod h1:hz10LOsAdzC3K/iXaKoFxOKTDRgxJl+BTGX1GY+TzO4= -github.com/weaveworks/common v0.0.0-20201119133501-0619918236ec/go.mod h1:ykzWac1LtVfOxdCK+jD754at1Ws9dKCwFeUzkFBffPs= -github.com/weaveworks/common v0.0.0-20210112142934-23c8d7fa6120/go.mod h1:ykzWac1LtVfOxdCK+jD754at1Ws9dKCwFeUzkFBffPs= -github.com/weaveworks/common v0.0.0-20210419092856-009d1eebd624/go.mod h1:ykzWac1LtVfOxdCK+jD754at1Ws9dKCwFeUzkFBffPs= github.com/weaveworks/common v0.0.0-20210913144402-035033b78a78/go.mod h1:YU9FvnS7kUnRt6HY10G+2qHkwzP3n3Vb1XsXDsJTSp8= github.com/weaveworks/common v0.0.0-20211109170639-0684aab3d884 h1:8xKd5YG67aKrYwyxXA55ox7AjCMqiMq4gWBVi+lkKLE= github.com/weaveworks/common v0.0.0-20211109170639-0684aab3d884/go.mod h1:GWX2dQ7yjrgvqH0+d3kCJC5bsY8oOFwqjxFMHaRK4/k= @@ -1555,8 +1299,6 @@ github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhe github.com/xdg/stringprep v0.0.0-20180714160509-73f8eece6fdc/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= -github.com/xlab/treeprint v0.0.0-20180616005107-d6fb6747feb6/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg= -github.com/xlab/treeprint v1.0.0/go.mod h1:IoImgRak9i3zJyuxOKUP1v4UZd1tMoKkq/Cimt1uhCg= github.com/xlab/treeprint v1.1.0/go.mod h1:gj5Gd3gPdKtR1ikdDK6fnFLdmIS0X30kTTuNd/WEJu0= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1568,42 +1310,25 @@ github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da h1:NimzV1aGyq29m5ukMK0AMWEhFaL/lrEOaephfuoiARg= github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= gitlab.com/nyarla/go-crypt v0.0.0-20160106005555-d9a5dc2b789b/go.mod h1:T3BPAOm2cqquPa0MKWeNkmOM5RQsRhkrwMWonFMN7fE= -go.elastic.co/apm v1.5.0/go.mod h1:OdB9sPtM6Vt7oz3VXt7+KR96i9li74qrxBGHTQygFvk= go.elastic.co/apm v1.11.0/go.mod h1:qoOSi09pnzJDh5fKnfY7bPmQgl8yl2tULdOu03xhui0= -go.elastic.co/apm/module/apmhttp v1.5.0/go.mod h1:1FbmNuyD3ddauwzgVwFB0fqY6KbZt3JkV187tGCYYhY= go.elastic.co/apm/module/apmhttp v1.11.0/go.mod h1:5JFMIxdeS4vJy+D1PPPjINuX6hZ3AHalZXoOgyqZAkk= -go.elastic.co/apm/module/apmot v1.5.0/go.mod h1:d2KYwhJParTpyw2WnTNy8geNlHKKFX+4oK3YLlsesWE= go.elastic.co/apm/module/apmot v1.11.0/go.mod h1:Qnbt3w1DvUd/5QugAF1AJ3mR4AG86EcJFBnAGW77EmU= -go.elastic.co/fastjson v1.0.0/go.mod h1:PmeUOMMtLHQr9ZS9J9owrAVg0FkaZDRZJEFTTGHtchs= go.elastic.co/fastjson v1.1.0/go.mod h1:boNGISWMjQsUPy/t6yqt2/1Wx4YNPSe+mZjlyw9vKKI= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.5-0.20200615073812-232d8fc87f50/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= -go.etcd.io/etcd v0.0.0-20190709142735-eb7dd97135a5/go.mod h1:N0RPWo9FXJYZQI4BTkDtQylrstIigYHeR18ONnyTufk= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.etcd.io/etcd v0.5.0-alpha.5.0.20200520232829-54ba9589114f/go.mod h1:skWido08r9w6Lq/w70DO5XYIKMu4QFu1+4VsqLQuJy8= go.etcd.io/etcd v3.3.25+incompatible h1:V1RzkZJj9LqsJRy+TUBgpWSbZXITLB819lstuTFoZOY= go.etcd.io/etcd v3.3.25+incompatible/go.mod h1:yaeTdrJi5lOmYerz05bd8+V7KubZs8YSFZfzsF9A6aI= -go.etcd.io/etcd/api/v3 v3.5.0-alpha.0/go.mod h1:mPcW6aZJukV6Aa81LSKpBjQXTWlXB5r74ymPoSWa3Sw= go.etcd.io/etcd/api/v3 v3.5.0 h1:GsV3S+OfZEOCNXdtNkBSR7kgLobAa/SO6tCxRa0GAYw= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0 h1:2aQv6F436YnN7I4VbI8PPYrBhu+SmrTaADcf8Mi/6PU= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.0-alpha.0/go.mod h1:kdV+xzCJ3luEBSIeQyB/OEKkWKd8Zkux4sbDeANrosU= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.etcd.io/etcd/client/v3 v3.5.0-alpha.0/go.mod h1:wKt7jgDgf/OfKiYmCq5WFGxOFAkVMLxiiXgLDFhECr8= -go.etcd.io/etcd/client/v3 v3.5.0-alpha.0.0.20210225194612-fa82d11a958a/go.mod h1:wKt7jgDgf/OfKiYmCq5WFGxOFAkVMLxiiXgLDFhECr8= go.etcd.io/etcd/client/v3 v3.5.0 h1:62Eh0XOro+rDwkrypAGDfgmNh5Joq+z+W9HZdlXMzek= go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.etcd.io/etcd/pkg/v3 v3.5.0-alpha.0/go.mod h1:tV31atvwzcybuqejDoY3oaNRTtlD2l/Ot78Pc9w7DMY= -go.etcd.io/etcd/raft/v3 v3.5.0-alpha.0/go.mod h1:FAwse6Zlm5v4tEWZaTjmNhe17Int4Oxbu7+2r0DiD3w= -go.etcd.io/etcd/server/v3 v3.5.0-alpha.0.0.20210225194612-fa82d11a958a/go.mod h1:tsKetYpt980ZTpzl/gb+UOJj9RkIyCb1u4wjzMg90BQ= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.0/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.1.1/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= -go.mongodb.org/mongo-driver v1.1.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.3.0/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= go.mongodb.org/mongo-driver v1.3.4/go.mod h1:MSWZXKOynuguX+JSvwP8i+58jYCXxbia8HS3gZBapIE= go.mongodb.org/mongo-driver v1.4.3/go.mod h1:WcMNYLx/IlOxLe6JRJiv2uXuCz6zBLndR4SoGjYphSc= @@ -1629,7 +1354,6 @@ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.32.0 h1:mac9BKR go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.32.0/go.mod h1:5eCOqeGphOyz6TsY3ZDNjE33SM/TFAK3RGuCL2naTgY= go.opentelemetry.io/contrib/propagators/ot v1.4.0 h1:sHp8P5+xmMORvsgKjIPPX4U97JUgSqY4xPWa6ncF1PA= go.opentelemetry.io/contrib/propagators/ot v1.4.0/go.mod h1:FivzsGJqC7ND++UUOifWfkiuEOFXtVQ3fh2ZkqIJ9X4= -go.opentelemetry.io/otel v0.11.0/go.mod h1:G8UCk+KooF2HLkgo8RHX9epABH/aRGYET7gQOqBVdB0= go.opentelemetry.io/otel v1.0.0-RC3/go.mod h1:Ka5j3ua8tZs4Rkq4Ex3hwgBgOchyPVq5S6P2lz//nKQ= go.opentelemetry.io/otel v1.0.0/go.mod h1:AjRVh9A5/5DE7S+mZtTR6t8vpKKryam+0lREnfmS4cg= go.opentelemetry.io/otel v1.0.1/go.mod h1:OPEOD4jIT2SlZPMmwT6FqZz2C0ZNdQqiWcoK6M0SNFU= @@ -1664,11 +1388,9 @@ go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.5.1/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= -go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= go.uber.org/atomic v1.9.0 h1:ECmE8Bn/WFTYwEW/bpKD3M8VtR/zQVbavAoalC1PYyE= go.uber.org/atomic v1.9.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/automaxprocs v1.2.0/go.mod h1:YfO3fm683kQpzETxlTGZhGIVmXAhaw3gxeBADbpZtnU= go.uber.org/automaxprocs v1.4.0/go.mod h1:/mTEdr7LvHhs0v7mjdxDreTz1OG5zdZGqgOnhWiR/+Q= go.uber.org/automaxprocs v1.5.1/go.mod h1:BF4eumQw0P9GtnuxxovUd06vwm1o18oMzFtK66vU6XU= go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= @@ -1678,20 +1400,15 @@ go.uber.org/goleak v1.1.12/go.mod h1:cwTWslyiVhfpKIDGSZEM2HlOvcqm+tG4zioyIeLoqMQ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.7.0 h1:zaiO/rmgFjbmCXdSYJWQcdvOCsthmdaHfr3Gm2Kx4Ec= go.uber.org/multierr v1.7.0/go.mod h1:7EAYxJLBy9rStEaz58O2t4Uvip6FSURkq8/ppBp95ak= go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= -go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= -go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.19.1 h1:ue41HOKd1vGURxrmeKIgELGb3jPW9DMUDGtsinblHwI= go.uber.org/zap v1.19.1/go.mod h1:j3DNczoxDZroyBnOT1L/Q79cfUMGZxlv/9dzN7SM1rI= -golang.org/x/crypto v0.0.0-20180608092829-8ac0e0d97ce4/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -1700,7 +1417,6 @@ golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20190513172903-22d7a77e9e5f/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -1709,15 +1425,11 @@ golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191112222119-e1110fd1c708/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20191202143827-86a70503ff7e/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200709230013-948cd5f35899/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200728195943-123391ffb6de/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201208171446-5f87f3452ae9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20201216223049-8b5274cf687f/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210513164829-c07d793c2f9a/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= @@ -1735,15 +1447,12 @@ golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= -golang.org/x/exp v0.0.0-20191029154019-8994fa331a53/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/exp v0.0.0-20200821190819-94841d0725da/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= golang.org/x/image v0.0.0-20180708004352-c73c2afc3b81/go.mod h1:ux5Hcp/YLpHSI86hEcLt0YII63i6oz57MZXIpbrjZUs= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= @@ -1774,7 +1483,6 @@ golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3 h1:kQgndtyPBW/JIYERgdxfwMYh3AVStj88WQTlNDi2a+o= golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1792,7 +1500,6 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190424112056-4829fb13d2c6/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1802,11 +1509,8 @@ golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190921015927-1a5e07d1ff72/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191007182048-72f939374954/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191126235420-ef20fe5d7933/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1814,7 +1518,6 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -1824,7 +1527,6 @@ golang.org/x/net v0.0.0-20200602114024-627f9648deb9/go.mod h1:qpuaurCH72eLCgpAm/ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= -golang.org/x/net v0.0.0-20201006153459-a7d1128ccaa0/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= @@ -1834,7 +1536,6 @@ golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= -golang.org/x/net v0.0.0-20210324051636-2c4c8ecb7826/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210410081132-afb366fc7cd1/go.mod h1:9tjilg8BloeKEkVJvy7fQ90B1CfIiPueXVOjqfkSzI8= golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= @@ -1869,11 +1570,9 @@ golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210210192628-66670185b0cd/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= @@ -1894,13 +1593,11 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20200930132711-30421366ff76/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f h1:Ax0t5p6N38Ga0dThY21weqDEyz2oklo4IvDkpigvkD8= golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1920,7 +1617,6 @@ golang.org/x/sys v0.0.0-20190411185658-b44545bcd369/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190425145619-16072639606e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190426135247-a129542de9ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1929,7 +1625,6 @@ golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190902133755-9109b7679e13/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1939,18 +1634,14 @@ golang.org/x/sys v0.0.0-20190924154521-2837fb4f24fe/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191010194322-b09406accb47/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191113165036-4c7a9d0fe056/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191128015809-6d18c012aee9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200107162124-548cf772de50/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1962,7 +1653,6 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1970,18 +1660,14 @@ golang.org/x/sys v0.0.0-20200519105757-fe76b779f299/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200728102440-3e129f6d46b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200828194041-157a740278f4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201015000850-e3ed0017c211/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201207223542-d4d67f95c62d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1989,8 +1675,6 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210314195730-07df6a141424/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1998,7 +1682,6 @@ golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210525143221-35b2ab0089ea/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2033,7 +1716,6 @@ golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXR golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211 h1:JGgROgKl9N8DuW20oFS5gxc+lE67/N3FcwmBPMe7ArY= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2049,9 +1731,6 @@ golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210611083556-38a9dc6acbc6/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2061,7 +1740,6 @@ golang.org/x/time v0.0.0-20220224211638-0e9765cccd65/go.mod h1:tRJNPiyCQ0inRvYxb golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190125232054-d66bd3c5d5a6/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190206041539-40960b6deb8e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -2084,7 +1762,6 @@ golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgw golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190624222133-a101b041ded4/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190813034749-528a2984e271/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190907020128-2ca718005c18/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -2092,19 +1769,14 @@ golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191111182352-50fa39b762bc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191203134012-c197fd4bf371/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191216052735-49a3e744a425/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= -golang.org/x/tools v0.0.0-20200108203644-89082a384178/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= @@ -2121,7 +1793,6 @@ golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roY golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200509030707-2212a7e161a5/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200513201620-d5fe73897c97/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -2130,7 +1801,6 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201014170642-d1624618ad65/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= @@ -2177,15 +1847,11 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= -google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.39.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= -google.golang.org/api v0.42.0/go.mod h1:+Oj4s6ch2SEGtPjGqfUfZonBH0GjQH89gTeKKAEGZKI= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= -google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I= google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= @@ -2217,7 +1883,6 @@ google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCID google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180518175338-11a468237815/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180608181217-32ee49c4dd80/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -2228,13 +1893,10 @@ google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dT google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191028173616-919d9bdd9fe6/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= -google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= @@ -2252,12 +1914,9 @@ google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1m google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200605102947-12044bf5ea91/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200710124503-20a17af7bd0e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200724131911-43cab4749ae7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200806141610-86f49bd18e98/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200815001618-f69a88009b70/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -2268,10 +1927,8 @@ google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210312152112-fc591d9ea70f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= @@ -2333,7 +1990,6 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw= google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -2347,13 +2003,10 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.42.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.57.0 h1:9unxIsFcTt4I55uWluz+UmL95q4kdJ0buvQ1ZIqVQww= gopkg.in/ini.v1 v1.57.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= @@ -2368,7 +2021,6 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.7/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= @@ -2390,28 +2042,21 @@ honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= -k8s.io/api v0.0.0-20191115095533-47f6de673b26/go.mod h1:iA/8arsvelvo4IDqIhX4IbjTEKBGgvsf2OraTuRtLFU= k8s.io/api v0.24.1 h1:BjCMRDcyEYz03joa3K1+rbshwh1Ay6oB53+iUx2H8UY= k8s.io/api v0.24.1/go.mod h1:JhoOvNiLXKTPQ60zh2g0ewpA+bnEYf5q44Flhquh4vQ= -k8s.io/apimachinery v0.0.0-20191115015347-3c7067801da2/go.mod h1:dXFS2zaQR8fyzuvRdJDHw2Aerij/yVGJSre0bZQSVJA= k8s.io/apimachinery v0.24.1 h1:ShD4aDxTQKN5zNf8K1RQ2u98ELLdIW7jEnlO9uAMX/I= k8s.io/apimachinery v0.24.1/go.mod h1:82Bi4sCzVBdpYjyI4jY6aHX+YCUchUIrZrXKedjd2UM= k8s.io/client-go v0.24.1 h1:w1hNdI9PFrzu3OlovVeTnf4oHDt+FJLd9Ndluvnb42E= k8s.io/client-go v0.24.1/go.mod h1:f1kIDqcEYmwXS/vTbbhopMUbhKp2JhOeVTfxgaCIlF8= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8= k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.60.1 h1:VW25q3bZx9uE3vvdL6M8ezOX79vA2Aq1nEWLqNQclHc= k8s.io/klog/v2 v2.60.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20191107075043-30be4d16710a/go.mod h1:1TqjTSzOxsLGIKfj0lK8EeCP7K1iUG65v09OM0/WG5E= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42 h1:Gii5eqf+GmIEwGNKQYQClCayuJCe2/4fZUvF7VG99sU= k8s.io/kube-openapi v0.0.0-20220328201542-3ee0da9b0b42/go.mod h1:Z/45zLw8lUo4wdiUkI+v/ImEGAvu3WatcZl3lPMR4Rk= -k8s.io/utils v0.0.0-20191114200735-6ca3b61696b6/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9 h1:HNSDgDCrr/6Ly3WEGKZftiE7IY19Vz2GdbOCyI4qqhc= k8s.io/utils v0.0.0-20220210201930-3a6ce19ff2f9/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= @@ -2421,8 +2066,6 @@ rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2 h1:kDi4JBNAsJWfz1aEXhO8Jg87JJaPNLh5tIzYHgStQ9Y= sigs.k8s.io/json v0.0.0-20211208200746-9f7c6b3444d2/go.mod h1:B+TnT182UBxE84DiCz4CVE26eOSDAeYCpfDnC2kdKMY= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e h1:4Z09Hglb792X0kfOBBJUPFEyvVfQWrYT/l8h5EKA6JQ= -sigs.k8s.io/structured-merge-diff v0.0.0-20190525122527-15d366b2352e/go.mod h1:wWxsB5ozmmv/SG7nM11ayaAW51xMvak/t1r0CSlcokI= sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y= sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4= diff --git a/pkg/compactor/bucket_compactor_e2e_test.go b/pkg/compactor/bucket_compactor_e2e_test.go index 23afa25e5c..81f9742e5a 100644 --- a/pkg/compactor/bucket_compactor_e2e_test.go +++ b/pkg/compactor/bucket_compactor_e2e_test.go @@ -674,7 +674,7 @@ func createBlockWithOptions( headOpts := tsdb.DefaultHeadOptions() headOpts.ChunkDirRoot = filepath.Join(dir, "chunks") headOpts.ChunkRange = 10000000000 - h, err := tsdb.NewHead(nil, nil, nil, headOpts, nil) + h, err := tsdb.NewHead(nil, nil, nil, nil, headOpts, nil) if err != nil { return id, errors.Wrap(err, "create head block") } diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index e600aa4bd1..6057439dc9 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -516,7 +516,7 @@ func (i *Ingester) applyExemplarsSettings() { if tsdb == nil { continue } - if err := tsdb.Head().ApplyConfig(&cfg); err != nil { + if err := tsdb.db.ApplyConfig(&cfg); err != nil { level.Error(i.logger).Log("msg", "failed to apply config to TSDB", "user", userID, "err", err) } } diff --git a/pkg/querier/querier_test.go b/pkg/querier/querier_test.go index 7cf19b0241..b5c1763fe2 100644 --- a/pkg/querier/querier_test.go +++ b/pkg/querier/querier_test.go @@ -234,7 +234,7 @@ func mockTSDB(t *testing.T, mint model.Time, samples int, step, chunkOffset time opts := tsdb.DefaultHeadOptions() opts.ChunkDirRoot = dir // We use TSDB head only. By using full TSDB DB, and appending samples to it, closing it would cause unnecessary HEAD compaction, which slows down the test. - head, err := tsdb.NewHead(nil, nil, nil, opts, nil) + head, err := tsdb.NewHead(nil, nil, nil, nil, opts, nil) require.NoError(t, err) t.Cleanup(func() { _ = head.Close() diff --git a/pkg/storegateway/bucket_test.go b/pkg/storegateway/bucket_test.go index b928978588..c0b7cf8c43 100644 --- a/pkg/storegateway/bucket_test.go +++ b/pkg/storegateway/bucket_test.go @@ -1117,7 +1117,7 @@ func uploadTestBlock(t testing.TB, tmpDir string, bkt objstore.Bucket, series in headOpts := tsdb.DefaultHeadOptions() headOpts.ChunkDirRoot = tmpDir headOpts.ChunkRange = 1000 - h, err := tsdb.NewHead(nil, nil, nil, headOpts, nil) + h, err := tsdb.NewHead(nil, nil, nil, nil, headOpts, nil) assert.NoError(t, err) defer func() { assert.NoError(t, h.Close()) @@ -1489,7 +1489,7 @@ func TestBucketSeries_OneBlock_InMemIndexCacheSegfault(t *testing.T) { // This allows to pick time range that will correspond to number of series picked 1:1. { // Block 1. - h, err := tsdb.NewHead(nil, nil, nil, headOpts, nil) + h, err := tsdb.NewHead(nil, nil, nil, nil, headOpts, nil) assert.NoError(t, err) defer func() { assert.NoError(t, h.Close()) }() @@ -1528,7 +1528,7 @@ func TestBucketSeries_OneBlock_InMemIndexCacheSegfault(t *testing.T) { var b2 *bucketBlock { // Block 2, do not load this block yet. - h, err := tsdb.NewHead(nil, nil, nil, headOpts, nil) + h, err := tsdb.NewHead(nil, nil, nil, nil, headOpts, nil) assert.NoError(t, err) defer func() { assert.NoError(t, h.Close()) }() @@ -1763,7 +1763,7 @@ func TestSeries_BlockWithMultipleChunks(t *testing.T) { headOpts.ChunkDirRoot = filepath.Join(tmpDir, "block") headOpts.ChunkRange = math.MaxInt64 - h, err := tsdb.NewHead(nil, nil, nil, headOpts, nil) + h, err := tsdb.NewHead(nil, nil, nil, nil, headOpts, nil) assert.NoError(t, err) defer func() { assert.NoError(t, h.Close()) }() @@ -1913,7 +1913,7 @@ func createBlockWithOneSeriesWithStep(t test.TB, dir string, lbls labels.Labels, headOpts := tsdb.DefaultHeadOptions() headOpts.ChunkDirRoot = dir headOpts.ChunkRange = int64(totalSamples) * step - h, err := tsdb.NewHead(nil, nil, nil, headOpts, nil) + h, err := tsdb.NewHead(nil, nil, nil, nil, headOpts, nil) assert.NoError(t, err) defer func() { assert.NoError(t, h.Close()) }() @@ -2564,7 +2564,7 @@ func createHeadWithSeries(t testing.TB, j int, opts headGenOptions) (*tsdb.Head, headOpts := tsdb.DefaultHeadOptions() headOpts.ChunkDirRoot = opts.TSDBDir - h, err := tsdb.NewHead(nil, nil, w, headOpts, nil) + h, err := tsdb.NewHead(nil, nil, w, nil, headOpts, nil) assert.NoError(t, err) app := h.Appender(context.Background()) @@ -2616,7 +2616,7 @@ func createHeadWithSeries(t testing.TB, j int, opts headGenOptions) (*tsdb.Head, } for _, c := range chunkMetas { - chEnc, err := chks.Chunk(c.Ref) + chEnc, err := chks.Chunk(c) assert.NoError(t, err) // Open Chunk. diff --git a/pkg/storegateway/postings_codec_test.go b/pkg/storegateway/postings_codec_test.go index c194e6192c..175ad95cf5 100644 --- a/pkg/storegateway/postings_codec_test.go +++ b/pkg/storegateway/postings_codec_test.go @@ -29,7 +29,7 @@ func TestDiffVarintCodec(t *testing.T) { headOpts := tsdb.DefaultHeadOptions() headOpts.ChunkDirRoot = chunksDir headOpts.ChunkRange = 1000 - h, err := tsdb.NewHead(nil, nil, nil, headOpts, nil) + h, err := tsdb.NewHead(nil, nil, nil, nil, headOpts, nil) assert.NoError(t, err) t.Cleanup(func() { assert.NoError(t, h.Close()) diff --git a/pkg/storegateway/testhelper/testhelper.go b/pkg/storegateway/testhelper/testhelper.go index 56f48f3587..49dec39c28 100644 --- a/pkg/storegateway/testhelper/testhelper.go +++ b/pkg/storegateway/testhelper/testhelper.go @@ -52,7 +52,7 @@ func createBlock( headOpts := tsdb.DefaultHeadOptions() headOpts.ChunkDirRoot = filepath.Join(dir, "chunks") headOpts.ChunkRange = math.MaxInt64 - h, err := tsdb.NewHead(nil, nil, nil, headOpts, nil) + h, err := tsdb.NewHead(nil, nil, nil, nil, headOpts, nil) if err != nil { return id, errors.Wrap(err, "create head block") } diff --git a/tools/tsdb-index-health/main.go b/tools/tsdb-index-health/main.go index 66d2a911c2..f0ba1a5f24 100644 --- a/tools/tsdb-index-health/main.go +++ b/tools/tsdb-index-health/main.go @@ -326,7 +326,7 @@ func GatherIndexHealthStats(logger log.Logger, blockDir string, minTime, maxTime func verifyChunks(l log.Logger, cr *chunks.Reader, lset labels.Labels, chks []chunks.Meta) { for _, cm := range chks { - ch, err := cr.Chunk(cm.Ref) + ch, err := cr.Chunk(cm) if err != nil { level.Error(l).Log("msg", "failed to read chunk", "ref", cm.Ref, "err", err) continue diff --git a/tools/tsdb-print-chunk/main.go b/tools/tsdb-print-chunk/main.go index dd366a03dd..65d8d7bc7c 100644 --- a/tools/tsdb-print-chunk/main.go +++ b/tools/tsdb-print-chunk/main.go @@ -43,7 +43,7 @@ func main() { continue } - ch, err := cr.Chunk(chunks.ChunkRef(val)) + ch, err := cr.Chunk(chunks.Meta{Ref: chunks.ChunkRef(val)}) if err != nil { fmt.Fprintln(os.Stderr, "Failed to open chunk", val, "due to error:", err) continue diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index ce17803f97..701fb40d24 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -501,9 +501,36 @@ func (c *ScrapeConfig) MarshalYAML() (interface{}, error) { // StorageConfig configures runtime reloadable configuration options. type StorageConfig struct { + TSDBConfig *TSDBConfig `yaml:"tsdb,omitempty"` ExemplarsConfig *ExemplarsConfig `yaml:"exemplars,omitempty"` } +// TSDBConfig configures runtime reloadable configuration options. +type TSDBConfig struct { + // OutOfOrderAllowance sets how long back in time an out-of-order sample can be inserted + // into the TSDB. This is the one finally used by the TSDB and should be in the same unit + // as other timestamps in the TSDB. + OutOfOrderAllowance int64 + + // OutOfOrderAllowanceFlag holds the parsed duration from the config file. + // During unmarshall, this is converted into milliseconds and stored in OutOfOrderAllowance. + // This should not be used directly and must be converted into OutOfOrderAllowance. + OutOfOrderAllowanceFlag model.Duration `yaml:"out_of_order_allowance,omitempty"` +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (t *TSDBConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { + *t = TSDBConfig{} + type plain TSDBConfig + if err := unmarshal((*plain)(t)); err != nil { + return err + } + + t.OutOfOrderAllowance = time.Duration(t.OutOfOrderAllowanceFlag).Milliseconds() + + return nil +} + type TracingClientType string const ( diff --git a/vendor/github.com/prometheus/prometheus/storage/interface.go b/vendor/github.com/prometheus/prometheus/storage/interface.go index b56199f86e..6554b34d50 100644 --- a/vendor/github.com/prometheus/prometheus/storage/interface.go +++ b/vendor/github.com/prometheus/prometheus/storage/interface.go @@ -27,9 +27,10 @@ import ( // The errors exposed. var ( ErrNotFound = errors.New("not found") - ErrOutOfOrderSample = errors.New("out of order sample") - ErrDuplicateSampleForTimestamp = errors.New("duplicate sample for timestamp") - ErrOutOfBounds = errors.New("out of bounds") + ErrOutOfOrderSample = errors.New("out of order sample") // OOO support disabled and sample is OOO + ErrTooOldSample = errors.New("too old sample") // OOO support enabled, but sample outside of tolerance + ErrDuplicateSampleForTimestamp = errors.New("duplicate sample for timestamp") // WARNING: this is only reported if value differs. equal values get silently dropped + ErrOutOfBounds = errors.New("out of bounds") // OOO support disabled and t < minValidTime ErrOutOfOrderExemplar = errors.New("out of order exemplar") ErrDuplicateExemplar = errors.New("duplicate exemplar") ErrExemplarLabelLength = fmt.Errorf("label length for exemplar exceeds maximum of %d UTF-8 characters", exemplar.ExemplarMaxLabelSetLength) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/block.go b/vendor/github.com/prometheus/prometheus/tsdb/block.go index 9b3c91082d..399a2eed1f 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/block.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/block.go @@ -127,7 +127,7 @@ type ChunkWriter interface { // ChunkReader provides reading access of serialized time series data. type ChunkReader interface { // Chunk returns the series data chunk with the given reference. - Chunk(ref chunks.ChunkRef) (chunkenc.Chunk, error) + Chunk(ref chunks.Meta) (chunkenc.Chunk, error) // Close releases all underlying resources of the reader. Close() error diff --git a/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go b/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go index 25f6afaed3..4e9afda615 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/blockwriter.go @@ -71,7 +71,7 @@ func (w *BlockWriter) initHead() error { opts := DefaultHeadOptions() opts.ChunkRange = w.blockSize opts.ChunkDirRoot = w.chunkDir - h, err := NewHead(nil, w.logger, nil, opts, NewHeadStats()) + h, err := NewHead(nil, w.logger, nil, nil, opts, NewHeadStats()) if err != nil { return errors.Wrap(err, "tsdb.NewHead") } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go index bffb7e75ab..c5f8036a71 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/chunk.go @@ -39,6 +39,21 @@ const ( EncXOR ) +// Chunk encodings for out-of-order chunks. +// These encodings must be only used by the Head block for its internal bookkeeping. +const ( + OutOfOrderMask = 0b10000000 + EncOOOXOR = EncXOR | OutOfOrderMask +) + +func IsOutOfOrderChunk(e Encoding) bool { + return (e & OutOfOrderMask) != 0 +} + +func IsValidEncoding(e Encoding) bool { + return e == EncXOR || e == EncOOOXOR +} + // Chunk holds a sequence of sample pairs that can be iterated over and appended to. type Chunk interface { // Bytes returns the underlying byte slice of the chunk. @@ -155,7 +170,7 @@ func NewPool() Pool { func (p *pool) Get(e Encoding, b []byte) (Chunk, error) { switch e { - case EncXOR: + case EncXOR, EncOOOXOR: c := p.xor.Get().(*XORChunk) c.b.stream = b c.b.count = 0 @@ -166,7 +181,7 @@ func (p *pool) Get(e Encoding, b []byte) (Chunk, error) { func (p *pool) Put(c Chunk) error { switch c.Encoding() { - case EncXOR: + case EncXOR, EncOOOXOR: xc, ok := c.(*XORChunk) // This may happen often with wrapped chunks. Nothing we can really do about // it but returning an error would cause a lot of allocations again. Thus, @@ -188,7 +203,7 @@ func (p *pool) Put(c Chunk) error { // bytes. func FromData(e Encoding, d []byte) (Chunk, error) { switch e { - case EncXOR: + case EncXOR, EncOOOXOR: return &XORChunk{b: bstream{count: 0, stream: d}}, nil } return nil, errors.Errorf("invalid chunk encoding %q", e) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/ooo.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/ooo.go new file mode 100644 index 0000000000..90a73327f4 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/ooo.go @@ -0,0 +1,80 @@ +package chunkenc + +import ( + "sort" +) + +type sample struct { + t int64 + v float64 +} + +// OOOChunk maintains samples in time-ascending order. +// Inserts for timestamps already seen, are dropped. +// Samples are stored uncompressed to allow easy sorting. +// Perhaps we can be more efficient later. +type OOOChunk struct { + samples []sample +} + +func NewOOOChunk(capacity int) *OOOChunk { + return &OOOChunk{samples: make([]sample, 0, capacity)} +} + +// Insert inserts the sample such that order is maintained. +// Returns false if insert was not possible due to the same timestamp already existing. +func (o *OOOChunk) Insert(t int64, v float64) bool { + // find index of sample we should replace + i := sort.Search(len(o.samples), func(i int) bool { return o.samples[i].t >= t }) + + if i >= len(o.samples) { + // none found. append it at the end + o.samples = append(o.samples, sample{t, v}) + return true + } + + if o.samples[i].t == t { + return false + } + + // expand length by 1 to make room. use a zero sample, we will overwrite it anyway + o.samples = append(o.samples, sample{}) + copy(o.samples[i+1:], o.samples[i:]) + o.samples[i] = sample{t, v} + + return true +} + +func (o *OOOChunk) NumSamples() int { + return len(o.samples) +} + +func (o *OOOChunk) ToXor() (*XORChunk, error) { + x := NewXORChunk() + app, err := x.Appender() + if err != nil { + return nil, err + } + for _, s := range o.samples { + app.Append(s.t, s.v) + } + return x, nil +} + +func (o *OOOChunk) ToXorBetweenTimestamps(mint, maxt int64) (*XORChunk, error) { + x := NewXORChunk() + app, err := x.Appender() + if err != nil { + return nil, err + } + for _, s := range o.samples { + if s.t < mint { + continue + } + if s.t > maxt { + break + } + app.Append(s.t, s.v) + } + return x, nil +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go index ba00a6e811..716f0698f0 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunkenc/xor.go @@ -457,3 +457,12 @@ func (it *xorIterator) readValue() bool { it.numRead++ return true } + +// OOOXORChunk holds a XORChunk and overrides the Encoding() method. +type OOOXORChunk struct { + *XORChunk +} + +func (c *OOOXORChunk) Encoding() Encoding { + return EncOOOXOR +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go index 72cb0311bc..d77867283e 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/chunks.go @@ -120,6 +120,15 @@ type Meta struct { // Time range the data covers. // When MaxTime == math.MaxInt64 the chunk is still open and being appended to. MinTime, MaxTime int64 + + // OOOLastRef, OOOLastMinTime and OOOLastMaxTime are kept as markers for + // overlapping chunks. + // These fields point to the last created out of order Chunk (the head) that existed + // when Series() was called and was overlapping. + // Series() and Chunk() method responses should be consistent for the same + // query even if new data is added in between the calls. + OOOLastRef ChunkRef + OOOLastMinTime, OOOLastMaxTime int64 } // Iterator iterates over the chunks of a single time series. @@ -555,8 +564,8 @@ func (s *Reader) Size() int64 { } // Chunk returns a chunk from a given reference. -func (s *Reader) Chunk(ref ChunkRef) (chunkenc.Chunk, error) { - sgmIndex, chkStart := BlockChunkRef(ref).Unpack() +func (s *Reader) Chunk(meta Meta) (chunkenc.Chunk, error) { + sgmIndex, chkStart := BlockChunkRef(meta.Ref).Unpack() if sgmIndex >= len(s.bs) { return nil, errors.Errorf("segment index %d out of range", sgmIndex) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go index 79c3caf504..7a17bdcb76 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/head_chunks.go @@ -87,6 +87,18 @@ func (ref ChunkDiskMapperRef) Unpack() (seq, offset int) { return seq, offset } +func (ref ChunkDiskMapperRef) GreaterThanOrEqualTo(r ChunkDiskMapperRef) bool { + s1, o1 := ref.Unpack() + s2, o2 := r.Unpack() + return s1 > s2 || (s1 == s2 && o1 >= o2) +} + +func (ref ChunkDiskMapperRef) GreaterThan(r ChunkDiskMapperRef) bool { + s1, o1 := ref.Unpack() + s2, o2 := r.Unpack() + return s1 > s2 || (s1 == s2 && o1 > o2) +} + // CorruptionErr is an error that's returned when corruption is encountered. type CorruptionErr struct { Dir string @@ -859,9 +871,8 @@ func (cdm *ChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, chu return nil } -// Truncate deletes the head chunk files which are strictly below the mint. -// mint should be in milliseconds. -func (cdm *ChunkDiskMapper) Truncate(mint int64) error { +// Truncate deletes the head chunk files whose file number is less than given fileNo. +func (cdm *ChunkDiskMapper) Truncate(fileNo int) error { if !cdm.fileMaxtSet { return errors.New("maxt of the files are not set") } @@ -877,12 +888,10 @@ func (cdm *ChunkDiskMapper) Truncate(mint int64) error { var removedFiles []int for _, seq := range chkFileIndices { - if seq == cdm.curFileSequence || cdm.mmappedChunkFiles[seq].maxt >= mint { + if seq == cdm.curFileSequence || seq >= fileNo { break } - if cdm.mmappedChunkFiles[seq].maxt < mint { - removedFiles = append(removedFiles, seq) - } + removedFiles = append(removedFiles, seq) } cdm.readPathMtx.RUnlock() diff --git a/vendor/github.com/prometheus/prometheus/tsdb/chunks/old_head_chunks.go b/vendor/github.com/prometheus/prometheus/tsdb/chunks/old_head_chunks.go index 5301fd1ace..1722ddd9a3 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/chunks/old_head_chunks.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/chunks/old_head_chunks.go @@ -596,9 +596,8 @@ func (cdm *OldChunkDiskMapper) IterateAllChunks(f func(seriesRef HeadSeriesRef, return nil } -// Truncate deletes the head chunk files which are strictly below the mint. -// mint should be in milliseconds. -func (cdm *OldChunkDiskMapper) Truncate(mint int64) error { +// Truncate deletes the head chunk files whose file number is less than given fileNo. +func (cdm *OldChunkDiskMapper) Truncate(fileNo int) error { if !cdm.fileMaxtSet { return errors.New("maxt of the files are not set") } @@ -614,12 +613,10 @@ func (cdm *OldChunkDiskMapper) Truncate(mint int64) error { var removedFiles []int for _, seq := range chkFileIndices { - if seq == cdm.curFileSequence || cdm.mmappedChunkFiles[seq].maxt >= mint { + if seq == cdm.curFileSequence || seq >= fileNo { break } - if cdm.mmappedChunkFiles[seq].maxt < mint { - removedFiles = append(removedFiles, seq) - } + removedFiles = append(removedFiles, seq) } cdm.readPathMtx.RUnlock() diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go index 3e0bed0493..80d3f62536 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go @@ -75,6 +75,10 @@ type Compactor interface { // * The source dirs are marked Deletable. // * Returns empty ulid.ULID{}. Compact(dest string, dirs []string, open []*Block) (ulid.ULID, error) + + // CompactOOO creates a new block per possible block range in the compactor's directory from the OOO Head given. + // Each ULID in the result corresponds to a block in a unique time range. + CompactOOO(dest string, oooHead *OOOCompactionHead) (result []ulid.ULID, err error) } // LeveledCompactor implements the Compactor interface. @@ -546,6 +550,131 @@ func (c *LeveledCompactor) compact(dest string, dirs []string, open []*Block, sh return nil, errs.Err() } +// CompactOOOWithSplitting splits the input OOO Head into shardCount number of output blocks +// per possible block range, and returns slice of block IDs. In result[i][j], +// 'i' corresponds to a single time range of blocks while 'j' corresponds to the shard index. +// If given output block has no series, corresponding block ID will be zero ULID value. +// TODO: write tests for this. +func (c *LeveledCompactor) CompactOOOWithSplitting(dest string, oooHead *OOOCompactionHead, shardCount uint64) (result [][]ulid.ULID, _ error) { + return c.compactOOO(dest, oooHead, shardCount) +} + +// CompactOOO creates a new block per possible block range in the compactor's directory from the OOO Head given. +// Each ULID in the result corresponds to a block in a unique time range. +func (c *LeveledCompactor) CompactOOO(dest string, oooHead *OOOCompactionHead) (result []ulid.ULID, err error) { + ulids, err := c.compactOOO(dest, oooHead, 1) + if err != nil { + return nil, err + } + for _, s := range ulids { + if s[0].Compare(ulid.ULID{}) != 0 { + result = append(result, s[0]) + } + } + return result, err +} + +func (c *LeveledCompactor) compactOOO(dest string, oooHead *OOOCompactionHead, shardCount uint64) (_ [][]ulid.ULID, err error) { + if shardCount == 0 { + shardCount = 1 + } + + start := time.Now() + + if err != nil { + return nil, err + } + + // The first dimension of outBlocks determines the time based splitting (i.e. outBlocks[i] has blocks all for the same time range). + // The second dimension of outBlocks determines the label based shard (i.e. outBlocks[i][j] is the (j+1)th shard. + // During ingestion of samples we can identify which ooo blocks will exists so that + // we dont have to prefill symbols and etc for the blocks that will be empty. + // With this, len(outBlocks[x]) will still be the same for all x so that we can pick blocks easily. + // Just that, only some of the outBlocks[x][y] will be valid and populated based on preexisting knowledge of + // which blocks to expect. + // In case we see a sample that is not present in the estimated block ranges, we will create them on flight. + outBlocks := make([][]shardedBlock, 0) + outBlocksTime := ulid.Now() // Make all out blocks share the same timestamp in the ULID. + blockSize := oooHead.ChunkRange() + oooHeadMint, oooHeadMaxt := oooHead.MinTime(), oooHead.MaxTime() + ulids := make([][]ulid.ULID, 0) + for t := blockSize * (oooHeadMint / blockSize); t <= oooHeadMaxt; t = t + blockSize { + mint, maxt := t, t+blockSize + + outBlocks = append(outBlocks, make([]shardedBlock, shardCount)) + ulids = append(ulids, make([]ulid.ULID, shardCount)) + ix := len(outBlocks) - 1 + + for jx := range outBlocks[ix] { + uid := ulid.MustNew(outBlocksTime, rand.Reader) + meta := &BlockMeta{ + ULID: uid, + MinTime: mint, + MaxTime: maxt, + } + meta.Compaction.Level = 1 + meta.Compaction.Sources = []ulid.ULID{uid} + + outBlocks[ix][jx] = shardedBlock{ + meta: meta, + } + ulids[ix][jx] = meta.ULID + } + + // Block intervals are half-open: [b.MinTime, b.MaxTime). Block intervals are always +1 than the total samples it includes. + err := c.write(dest, outBlocks[ix], oooHead.CloneForTimeRange(mint, maxt-1)) + if err != nil { + // We need to delete all blocks in case there was an error. + for _, obs := range outBlocks { + for _, ob := range obs { + if ob.tmpDir != "" { + if removeErr := os.RemoveAll(ob.tmpDir); removeErr != nil { + level.Error(c.logger).Log("msg", "Failed to remove temp folder after failed compaction", "dir", ob.tmpDir, "err", removeErr.Error()) + } + } + if ob.blockDir != "" { + if removeErr := os.RemoveAll(ob.blockDir); removeErr != nil { + level.Error(c.logger).Log("msg", "Failed to remove block folder after failed compaction", "dir", ob.blockDir, "err", removeErr.Error()) + } + } + } + } + return nil, err + } + } + + noOOOBlock := true + for ix, obs := range outBlocks { + for jx := range obs { + meta := outBlocks[ix][jx].meta + if meta.Stats.NumSamples != 0 { + noOOOBlock = false + level.Info(c.logger).Log( + "msg", "compact ooo head", + "mint", meta.MinTime, + "maxt", meta.MaxTime, + "ulid", meta.ULID, + "duration", time.Since(start), + "shard", fmt.Sprintf("%d_of_%d", jx+1, shardCount), + ) + } else { + // This block did not get any data. So clear out the ulid to signal this. + ulids[ix][jx] = ulid.ULID{} + } + } + } + + if noOOOBlock { + level.Info(c.logger).Log( + "msg", "compact ooo head resulted in no blocks", + "duration", time.Since(start), + ) + return nil, nil + } + + return ulids, nil +} + func (c *LeveledCompactor) Write(dest string, b BlockReader, mint, maxt int64, parent *BlockMeta) (ulid.ULID, error) { start := time.Now() diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go index 66b0758bbb..a1177df0d1 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go @@ -33,6 +33,7 @@ import ( "github.com/oklog/ulid" "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" + "go.uber.org/atomic" "golang.org/x/sync/errgroup" "github.com/prometheus/prometheus/config" @@ -85,6 +86,8 @@ func DefaultOptions() *Options { IsolationDisabled: defaultIsolationDisabled, HeadChunksEndTimeVariance: 0, HeadChunksWriteQueueSize: chunks.DefaultWriteQueueSize, + OutOfOrderCapMin: DefaultOutOfOrderCapMin, + OutOfOrderCapMax: DefaultOutOfOrderCapMax, } } @@ -119,6 +122,7 @@ type Options struct { // Querying on overlapping blocks are allowed if AllowOverlappingQueries is true. // Since querying is a required operation for TSDB, if there are going to be // overlapping blocks, then this should be set to true. + // NOTE: Do not use this directly in DB. Use it via DB.AllowOverlappingQueries(). AllowOverlappingQueries bool // Compaction of overlapping blocks are allowed if AllowOverlappingCompaction is true. @@ -178,6 +182,19 @@ type Options struct { // If nil, the cache won't be used. SeriesHashCache *hashcache.SeriesHashCache + // OutOfOrderAllowance specifies how much out of order is allowed, if any. + // This can change during run-time, so this value from here should only be used + // while initialising. + OutOfOrderAllowance int64 + + // OutOfOrderCapMin minimum capacity for OOO chunks (in samples). + // If it is <=0, the default value is assumed. + OutOfOrderCapMin int64 + + // OutOfOrderCapMax is maximum capacity for OOO chunks (in samples). + // If it is <=0, the default value is assumed. + OutOfOrderCapMax int64 + // Temporary flag which we use to select whether we want to use the new or the old chunk disk mapper. NewChunkDiskMapper bool } @@ -217,6 +234,13 @@ type DB struct { // Cancel a running compaction when a shutdown is initiated. compactCancel context.CancelFunc + + // oooWasEnabled is true if out of order support was enabled at least one time + // during the time TSDB was up. In which case we need to keep supporting + // out-of-order compaction and vertical queries. + oooWasEnabled atomic.Bool + + registerer prometheus.Registerer } type dbMetrics struct { @@ -392,9 +416,17 @@ func (db *DBReadOnly) FlushWAL(dir string) (returnErr error) { if err != nil { return err } + var wbl *wal.WAL + wblDir := filepath.Join(db.dir, wal.WblDirName) + if _, err := os.Stat(wblDir); !os.IsNotExist(err) { + wbl, err = wal.Open(db.logger, wblDir) + if err != nil { + return err + } + } opts := DefaultHeadOptions() opts.ChunkDirRoot = db.dir - head, err := NewHead(nil, db.logger, w, opts, NewHeadStats()) + head, err := NewHead(nil, db.logger, w, wbl, opts, NewHeadStats()) if err != nil { return err } @@ -451,7 +483,7 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue opts := DefaultHeadOptions() opts.ChunkDirRoot = db.dir - head, err := NewHead(nil, db.logger, nil, opts, NewHeadStats()) + head, err := NewHead(nil, db.logger, nil, nil, opts, NewHeadStats()) if err != nil { return nil, err } @@ -469,9 +501,17 @@ func (db *DBReadOnly) loadDataAsQueryable(maxt int64) (storage.SampleAndChunkQue if err != nil { return nil, err } + var wbl *wal.WAL + wblDir := filepath.Join(db.dir, wal.WblDirName) + if _, err := os.Stat(wblDir); !os.IsNotExist(err) { + wbl, err = wal.Open(db.logger, wblDir) + if err != nil { + return nil, err + } + } opts := DefaultHeadOptions() opts.ChunkDirRoot = db.dir - head, err = NewHead(nil, db.logger, w, opts, NewHeadStats()) + head, err = NewHead(nil, db.logger, w, wbl, opts, NewHeadStats()) if err != nil { return nil, err } @@ -622,6 +662,18 @@ func validateOpts(opts *Options, rngs []int64) (*Options, []int64) { if opts.MinBlockDuration > opts.MaxBlockDuration { opts.MaxBlockDuration = opts.MinBlockDuration } + if opts.OutOfOrderAllowance > 0 { + opts.AllowOverlappingQueries = true + } + if opts.OutOfOrderCapMin <= 0 { + opts.OutOfOrderCapMin = DefaultOutOfOrderCapMin + } + if opts.OutOfOrderCapMax <= 0 { + opts.OutOfOrderCapMax = DefaultOutOfOrderCapMax + } + if opts.OutOfOrderAllowance < 0 { + opts.OutOfOrderAllowance = 0 + } if len(rngs) == 0 { // Start with smallest block duration and create exponential buckets until the exceed the @@ -658,6 +710,15 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs } walDir := filepath.Join(dir, "wal") + wblDir := filepath.Join(dir, wal.WblDirName) + // TODO(jesus.vazquez) Remove the block of code below, only necessary until all ooo_wbl dirs in prod have been replaced with wbl + oldWblDir := filepath.Join(dir, "ooo_wbl") + if _, err := os.Stat(oldWblDir); err == nil { + err = fileutil.Rename(oldWblDir, wblDir) + if err != nil { + return nil, errors.Wrap(err, "failed to move old wbl dir to new wbl dir") + } + } // Migrate old WAL if one exists. if err := MigrateWAL(l, walDir); err != nil { @@ -680,6 +741,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs autoCompact: true, chunkPool: chunkenc.NewPool(), blocksToDelete: opts.BlocksToDelete, + registerer: r, } defer func() { // Close files if startup fails somewhere. @@ -718,7 +780,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs } db.compactCancel = cancel - var wlog *wal.WAL + var wlog, wblog *wal.WAL segmentSize := wal.DefaultSegmentSize // Wal is enabled. if opts.WALSegmentSize >= 0 { @@ -730,8 +792,14 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs if err != nil { return nil, err } + if opts.OutOfOrderAllowance > 0 { + wblog, err = wal.NewSize(l, r, wblDir, segmentSize, opts.WALCompression) + if err != nil { + return nil, err + } + } } - + db.oooWasEnabled.Store(opts.OutOfOrderAllowance > 0) headOpts := DefaultHeadOptions() headOpts.ChunkRange = rngs[0] headOpts.ChunkDirRoot = dir @@ -744,12 +812,15 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs headOpts.EnableExemplarStorage = opts.EnableExemplarStorage headOpts.MaxExemplars.Store(opts.MaxExemplars) headOpts.EnableMemorySnapshotOnShutdown = opts.EnableMemorySnapshotOnShutdown + headOpts.OutOfOrderAllowance.Store(opts.OutOfOrderAllowance) + headOpts.OutOfOrderCapMin.Store(opts.OutOfOrderCapMin) + headOpts.OutOfOrderCapMax.Store(opts.OutOfOrderCapMax) headOpts.NewChunkDiskMapper = opts.NewChunkDiskMapper if opts.IsolationDisabled { // We only override this flag if isolation is disabled at DB level. We use the default otherwise. headOpts.IsolationDisabled = opts.IsolationDisabled } - db.head, err = NewHead(r, l, wlog, headOpts, stats.Head) + db.head, err = NewHead(r, l, wlog, wblog, headOpts, stats.Head) if err != nil { return nil, err } @@ -775,10 +846,19 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs if initErr := db.head.Init(minValidTime); initErr != nil { db.head.metrics.walCorruptionsTotal.Inc() - level.Warn(db.logger).Log("msg", "Encountered WAL read error, attempting repair", "err", initErr) - if err := wlog.Repair(initErr); err != nil { - return nil, errors.Wrap(err, "repair corrupted WAL") + isOOOErr := isErrLoadOOOWal(initErr) + if isOOOErr { + level.Warn(db.logger).Log("msg", "Encountered OOO WAL read error, attempting repair", "err", initErr) + if err := wblog.Repair(initErr); err != nil { + return nil, errors.Wrap(err, "repair corrupted OOO WAL") + } + } else { + level.Warn(db.logger).Log("msg", "Encountered WAL read error, attempting repair", "err", initErr) + if err := wlog.Repair(initErr); err != nil { + return nil, errors.Wrap(err, "repair corrupted WAL") + } } + } go db.run() @@ -872,8 +952,51 @@ func (db *DB) Appender(ctx context.Context) storage.Appender { return dbAppender{db: db, Appender: db.head.Appender(ctx)} } +// ApplyConfig applies a new config to the DB. +// Behaviour of 'OutOfOrderAllowance' is as follows: +// OOO enabled = oooAllowance > 0. OOO disabled = oooAllowance is 0. +// 1) Before: OOO disabled, Now: OOO enabled => +// * A new WBL is created for the head block. +// * OOO compaction is enabled. +// * Overlapping queries are enabled. +// 2) Before: OOO enabled, Now: OOO enabled => +// * Only the allowance is updated. +// 3) Before: OOO enabled, Now: OOO disabled => +// * Allowance set to 0. So no new OOO samples will be allowed. +// * OOO WBL will stay and follow the usual cleanup until a restart. +// * OOO Compaction and overlapping queries will remain enabled until a restart. +// 4) Before: OOO disabled, Now: OOO disabled => no-op. func (db *DB) ApplyConfig(conf *config.Config) error { - return db.head.ApplyConfig(conf) + oooAllowance := int64(0) + if conf.StorageConfig.TSDBConfig != nil { + oooAllowance = conf.StorageConfig.TSDBConfig.OutOfOrderAllowance + } + if oooAllowance < 0 { + oooAllowance = 0 + } + + // Create WBL if it was not present and if OOO is enabled with WAL enabled. + var wblog *wal.WAL + var err error + if !db.oooWasEnabled.Load() && oooAllowance > 0 && db.opts.WALSegmentSize >= 0 { + segmentSize := wal.DefaultSegmentSize + // Wal is set to a custom size. + if db.opts.WALSegmentSize > 0 { + segmentSize = db.opts.WALSegmentSize + } + oooWalDir := filepath.Join(db.dir, wal.WblDirName) + wblog, err = wal.NewSize(db.logger, db.registerer, oooWalDir, segmentSize, db.opts.WALCompression) + if err != nil { + return err + } + } + + db.head.ApplyConfig(conf, wblog) + + if !db.oooWasEnabled.Load() { + db.oooWasEnabled.Store(oooAllowance > 0) + } + return nil } // dbAppender wraps the DB's head appender and triggers compactions on commit @@ -949,6 +1072,9 @@ func (db *DB) Compact() (returnErr error) { // so in order to make sure that overlaps are evaluated // consistently, we explicitly remove the last value // from the block interval here. + // TODO(jesus.vazquez) Once we have the OOORangeHead we need to update + // TODO(jesus.vazquez) this method to accept a second parameter with an OOORangeHead to + // TODO(jesus.vazquez) compact the OOO Samples. if err := db.compactHead(NewRangeHead(db.head, mint, maxt-1)); err != nil { return errors.Wrap(err, "compact head") } @@ -970,6 +1096,14 @@ func (db *DB) Compact() (returnErr error) { "block_range", db.head.chunkRange.Load(), ) } + + if lastBlockMaxt != math.MinInt64 { + // The head was compacted, so we compact OOO head as well. + if err := db.compactOOOHead(); err != nil { + return errors.Wrap(err, "compact ooo head") + } + } + return db.compactBlocks() } @@ -988,6 +1122,47 @@ func (db *DB) CompactHead(head *RangeHead) error { return nil } +// CompactOOOHead compacts the OOO Head. +func (db *DB) CompactOOOHead() error { + db.cmtx.Lock() + defer db.cmtx.Unlock() + + return db.compactOOOHead() +} + +func (db *DB) compactOOOHead() error { + if !db.oooWasEnabled.Load() { + return nil + } + oooHead, err := NewOOOCompactionHead(db.head) + if err != nil { + return errors.Wrap(err, "get ooo compaction head") + } + + ulids, err := db.compactor.CompactOOO(db.dir, oooHead) + if err != nil { + return errors.Wrap(err, "compact ooo head") + } + if err := db.reloadBlocks(); err != nil { + errs := tsdb_errors.NewMulti(err) + for _, uid := range ulids { + if errRemoveAll := os.RemoveAll(filepath.Join(db.dir, uid.String())); errRemoveAll != nil { + errs.Add(errRemoveAll) + } + } + return errors.Wrap(errs.Err(), "reloadBlocks blocks after failed compact ooo head") + } + + lastWBLFile, minOOOMmapRef := oooHead.LastWBLFile(), oooHead.LastMmapRef() + if lastWBLFile != 0 || minOOOMmapRef != 0 { + if err := db.head.truncateOOO(lastWBLFile, minOOOMmapRef); err != nil { + return errors.Wrap(err, "truncate ooo wbl") + } + } + + return nil +} + // compactHead compacts the given RangeHead. // The compaction mutex should be held before calling this method. func (db *DB) compactHead(head *RangeHead) error { @@ -1145,7 +1320,7 @@ func (db *DB) reloadBlocks() (err error) { sort.Slice(toLoad, func(i, j int) bool { return toLoad[i].Meta().MinTime < toLoad[j].Meta().MinTime }) - if !db.opts.AllowOverlappingQueries { + if !db.AllowOverlappingQueries() { if err := validateBlockSequence(toLoad); err != nil { return errors.Wrap(err, "invalid block sequence") } @@ -1175,6 +1350,10 @@ func (db *DB) reloadBlocks() (err error) { return nil } +func (db *DB) AllowOverlappingQueries() bool { + return db.opts.AllowOverlappingQueries || db.oooWasEnabled.Load() +} + func openBlocks(l log.Logger, dir string, loaded []*Block, chunkPool chunkenc.Pool, cache *hashcache.SeriesHashCache) (blocks []*Block, corrupted map[ulid.ULID]error, err error) { bDirs, err := blockDirs(dir) if err != nil { @@ -1555,13 +1734,13 @@ func (db *DB) Querier(_ context.Context, mint, maxt int64) (storage.Querier, err blocks = append(blocks, b) } } - var headQuerier storage.Querier + var inOrderHeadQuerier storage.Querier if maxt >= db.head.MinTime() { rh := NewRangeHead(db.head, mint, maxt) var err error - headQuerier, err = NewBlockQuerier(rh, mint, maxt) + inOrderHeadQuerier, err = NewBlockQuerier(rh, mint, maxt) if err != nil { - return nil, errors.Wrapf(err, "open querier for head %s", rh) + return nil, errors.Wrapf(err, "open block querier for head %s", rh) } // Getting the querier above registers itself in the queue that the truncation waits on. @@ -1569,20 +1748,30 @@ func (db *DB) Querier(_ context.Context, mint, maxt int64) (storage.Querier, err // won't run into a race later since any truncation that comes after will wait on this querier if it overlaps. shouldClose, getNew, newMint := db.head.IsQuerierCollidingWithTruncation(mint, maxt) if shouldClose { - if err := headQuerier.Close(); err != nil { - return nil, errors.Wrapf(err, "closing head querier %s", rh) + if err := inOrderHeadQuerier.Close(); err != nil { + return nil, errors.Wrapf(err, "closing head block querier %s", rh) } - headQuerier = nil + inOrderHeadQuerier = nil } if getNew { rh := NewRangeHead(db.head, newMint, maxt) - headQuerier, err = NewBlockQuerier(rh, newMint, maxt) + inOrderHeadQuerier, err = NewBlockQuerier(rh, newMint, maxt) if err != nil { - return nil, errors.Wrapf(err, "open querier for head while getting new querier %s", rh) + return nil, errors.Wrapf(err, "open block querier for head while getting new querier %s", rh) } } } + var outOfOrderHeadQuerier storage.Querier + if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) { + rh := NewOOORangeHead(db.head, mint, maxt) + var err error + outOfOrderHeadQuerier, err = NewBlockQuerier(rh, mint, maxt) + if err != nil { + return nil, errors.Wrapf(err, "open block querier for ooo head %s", rh) + } + } + blockQueriers := make([]storage.Querier, 0, len(blocks)) for _, b := range blocks { q, err := NewBlockQuerier(b, mint, maxt) @@ -1597,8 +1786,11 @@ func (db *DB) Querier(_ context.Context, mint, maxt int64) (storage.Querier, err } return nil, errors.Wrapf(err, "open querier for block %s", b) } - if headQuerier != nil { - blockQueriers = append(blockQueriers, headQuerier) + if inOrderHeadQuerier != nil { + blockQueriers = append(blockQueriers, inOrderHeadQuerier) + } + if outOfOrderHeadQuerier != nil { + blockQueriers = append(blockQueriers, outOfOrderHeadQuerier) } return storage.NewMergeQuerier(blockQueriers, nil, storage.ChainedSeriesMerge), nil } @@ -1615,11 +1807,11 @@ func (db *DB) ChunkQuerier(_ context.Context, mint, maxt int64) (storage.ChunkQu blocks = append(blocks, b) } } - var headQuerier storage.ChunkQuerier + var inOrderHeadQuerier storage.ChunkQuerier if maxt >= db.head.MinTime() { rh := NewRangeHead(db.head, mint, maxt) var err error - headQuerier, err = NewBlockChunkQuerier(rh, mint, maxt) + inOrderHeadQuerier, err = NewBlockChunkQuerier(rh, mint, maxt) if err != nil { return nil, errors.Wrapf(err, "open querier for head %s", rh) } @@ -1629,20 +1821,30 @@ func (db *DB) ChunkQuerier(_ context.Context, mint, maxt int64) (storage.ChunkQu // won't run into a race later since any truncation that comes after will wait on this querier if it overlaps. shouldClose, getNew, newMint := db.head.IsQuerierCollidingWithTruncation(mint, maxt) if shouldClose { - if err := headQuerier.Close(); err != nil { + if err := inOrderHeadQuerier.Close(); err != nil { return nil, errors.Wrapf(err, "closing head querier %s", rh) } - headQuerier = nil + inOrderHeadQuerier = nil } if getNew { rh := NewRangeHead(db.head, newMint, maxt) - headQuerier, err = NewBlockChunkQuerier(rh, newMint, maxt) + inOrderHeadQuerier, err = NewBlockChunkQuerier(rh, newMint, maxt) if err != nil { return nil, errors.Wrapf(err, "open querier for head while getting new querier %s", rh) } } } + var outOfOrderHeadQuerier storage.ChunkQuerier + if overlapsClosedInterval(mint, maxt, db.head.MinOOOTime(), db.head.MaxOOOTime()) { + rh := NewOOORangeHead(db.head, mint, maxt) + var err error + outOfOrderHeadQuerier, err = NewBlockChunkQuerier(rh, mint, maxt) + if err != nil { + return nil, errors.Wrapf(err, "open block chunk querier for ooo head %s", rh) + } + } + blockQueriers := make([]storage.ChunkQuerier, 0, len(blocks)) for _, b := range blocks { q, err := NewBlockChunkQuerier(b, mint, maxt) @@ -1657,8 +1859,11 @@ func (db *DB) ChunkQuerier(_ context.Context, mint, maxt int64) (storage.ChunkQu } return nil, errors.Wrapf(err, "open querier for block %s", b) } - if headQuerier != nil { - blockQueriers = append(blockQueriers, headQuerier) + if inOrderHeadQuerier != nil { + blockQueriers = append(blockQueriers, inOrderHeadQuerier) + } + if outOfOrderHeadQuerier != nil { + blockQueriers = append(blockQueriers, outOfOrderHeadQuerier) } return storage.NewMergeChunkQuerier(blockQueriers, nil, storage.NewCompactingChunkSeriesMerger(storage.ChainedSeriesMerge)), nil diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go index 038a637d92..aef7dde308 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go @@ -25,9 +25,10 @@ import ( "github.com/go-kit/log/level" "github.com/oklog/ulid" "github.com/pkg/errors" - "github.com/prometheus/client_golang/prometheus" "go.uber.org/atomic" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/prometheus/config" "github.com/prometheus/prometheus/model/exemplar" "github.com/prometheus/prometheus/model/labels" @@ -62,7 +63,7 @@ var ( type chunkDiskMapper interface { CutNewFile() (returnErr error) IterateAllChunks(f func(seriesRef chunks.HeadSeriesRef, chunkRef chunks.ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding) error) (err error) - Truncate(mint int64) error + Truncate(fileNo int) error DeleteCorrupted(originalErr error) error Size() (int64, error) Close() error @@ -75,15 +76,19 @@ type chunkDiskMapper interface { type Head struct { chunkRange atomic.Int64 numSeries atomic.Uint64 - minTime, maxTime atomic.Int64 // Current min and max of the samples included in the head. + minOOOTime, maxOOOTime atomic.Int64 // TODO(jesus) These should be updated after garbage collection + minTime, maxTime atomic.Int64 // Current min and max of the samples included in the head. // TODO(jesus.vazquez) Ensure these are properly tracked. minValidTime atomic.Int64 // Mint allowed to be added to the head. It shouldn't be lower than the maxt of the last persisted block. lastWALTruncationTime atomic.Int64 lastMemoryTruncationTime atomic.Int64 lastSeriesID atomic.Uint64 + // All the ooo m-map chunks should be after this. This is used to truncate old ooo m-map chunks. + // This should be typecasted to chunks.ChunkDiskMapperRef after loading. + minOOOMmapRef atomic.Uint64 metrics *headMetrics opts *HeadOptions - wal *wal.WAL + wal, wbl *wal.WAL exemplarMetrics *ExemplarMetrics exemplars ExemplarStorage logger log.Logger @@ -99,6 +104,7 @@ type Head struct { deletedMtx sync.Mutex deleted map[chunks.HeadSeriesRef]int // Deleted series, and what WAL segment they must be kept until. + // TODO(ganesh) extend MemPostings to return only OOOPostings, Set OOOStatus, ... Like an additional map of ooo postings. postings *index.MemPostings // Postings lists for terms. pfmc *PostingsForMatchersCache @@ -144,6 +150,9 @@ type HeadOptions struct { ChunkWriteBufferSize int ChunkEndTimeVariance float64 ChunkWriteQueueSize int + OutOfOrderAllowance atomic.Int64 + OutOfOrderCapMin atomic.Int64 + OutOfOrderCapMax atomic.Int64 // StripeSize sets the number of entries in the hash map, it must be a power of 2. // A larger StripeSize will allocate more memory up-front, but will increase performance when handling a large number of series. @@ -160,8 +169,13 @@ type HeadOptions struct { NewChunkDiskMapper bool } +const ( + DefaultOutOfOrderCapMin int64 = 4 + DefaultOutOfOrderCapMax int64 = 32 +) + func DefaultHeadOptions() *HeadOptions { - return &HeadOptions{ + ho := &HeadOptions{ ChunkRange: DefaultBlockDuration, ChunkDirRoot: "", ChunkPool: chunkenc.NewPool(), @@ -173,6 +187,9 @@ func DefaultHeadOptions() *HeadOptions { IsolationDisabled: defaultIsolationDisabled, NewChunkDiskMapper: false, } + ho.OutOfOrderCapMin.Store(DefaultOutOfOrderCapMin) + ho.OutOfOrderCapMax.Store(DefaultOutOfOrderCapMax) + return ho } // SeriesLifecycleCallback specifies a list of callbacks that will be called during a lifecycle of a series. @@ -191,11 +208,32 @@ type SeriesLifecycleCallback interface { } // NewHead opens the head block in dir. -func NewHead(r prometheus.Registerer, l log.Logger, wal *wal.WAL, opts *HeadOptions, stats *HeadStats) (*Head, error) { +func NewHead(r prometheus.Registerer, l log.Logger, wal, wbl *wal.WAL, opts *HeadOptions, stats *HeadStats) (*Head, error) { var err error if l == nil { l = log.NewNopLogger() } + + if opts.OutOfOrderAllowance.Load() < 0 { + opts.OutOfOrderAllowance.Store(0) + } + + // Allowance can be set on runtime. So the capMin and capMax should be valid + // even if ooo is not enabled yet. + capMin, capMax := opts.OutOfOrderCapMin.Load(), opts.OutOfOrderCapMax.Load() + if capMin > 255 { + return nil, errors.Errorf("OOOCapMin invalid %d. must be <= 255", capMin) + } + if capMax > 255 { + return nil, errors.Errorf("OOOCapMax invalid %d. must be <= 255", capMin) + } + if capMin < 0 { + return nil, errors.Errorf("OOOCapMin invalid %d. must be >= 0", capMin) + } + if capMax <= 0 || capMax < capMin { + return nil, errors.Errorf("OOOCapMax invalid %d. must be > 0 and >= OOOCapMin", capMax) + } + if opts.ChunkRange < 1 { return nil, errors.Errorf("invalid chunk range %d", opts.ChunkRange) } @@ -213,6 +251,7 @@ func NewHead(r prometheus.Registerer, l log.Logger, wal *wal.WAL, opts *HeadOpti h := &Head{ wal: wal, + wbl: wbl, logger: l, opts: opts, memChunkPool: sync.Pool{ @@ -284,36 +323,40 @@ func (h *Head) resetInMemoryState() error { h.chunkRange.Store(h.opts.ChunkRange) h.minTime.Store(math.MaxInt64) h.maxTime.Store(math.MinInt64) + h.minOOOTime.Store(math.MaxInt64) + h.maxOOOTime.Store(math.MinInt64) h.lastWALTruncationTime.Store(math.MinInt64) h.lastMemoryTruncationTime.Store(math.MinInt64) return nil } type headMetrics struct { - activeAppenders prometheus.Gauge - series prometheus.GaugeFunc - seriesCreated prometheus.Counter - seriesRemoved prometheus.Counter - seriesNotFound prometheus.Counter - chunks prometheus.Gauge - chunksCreated prometheus.Counter - chunksRemoved prometheus.Counter - gcDuration prometheus.Summary - samplesAppended prometheus.Counter - outOfBoundSamples prometheus.Counter - outOfOrderSamples prometheus.Counter - walTruncateDuration prometheus.Summary - walCorruptionsTotal prometheus.Counter - walTotalReplayDuration prometheus.Gauge - headTruncateFail prometheus.Counter - headTruncateTotal prometheus.Counter - checkpointDeleteFail prometheus.Counter - checkpointDeleteTotal prometheus.Counter - checkpointCreationFail prometheus.Counter - checkpointCreationTotal prometheus.Counter - mmapChunkCorruptionTotal prometheus.Counter - snapshotReplayErrorTotal prometheus.Counter // Will be either 0 or 1. - oooHistogram prometheus.Histogram + activeAppenders prometheus.Gauge + series prometheus.GaugeFunc + seriesCreated prometheus.Counter + seriesRemoved prometheus.Counter + seriesNotFound prometheus.Counter + chunks prometheus.Gauge + chunksCreated prometheus.Counter + chunksRemoved prometheus.Counter + gcDuration prometheus.Summary + samplesAppended prometheus.Counter + outOfOrderSamplesAppended prometheus.Counter + outOfBoundSamples prometheus.Counter + outOfOrderSamples prometheus.Counter + tooOldSamples prometheus.Counter + walTruncateDuration prometheus.Summary + walCorruptionsTotal prometheus.Counter + dataTotalReplayDuration prometheus.Gauge + headTruncateFail prometheus.Counter + headTruncateTotal prometheus.Counter + checkpointDeleteFail prometheus.Counter + checkpointDeleteTotal prometheus.Counter + checkpointCreationFail prometheus.Counter + checkpointCreationTotal prometheus.Counter + mmapChunkCorruptionTotal prometheus.Counter + snapshotReplayErrorTotal prometheus.Counter // Will be either 0 or 1. + oooHistogram prometheus.Histogram } func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { @@ -364,7 +407,7 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { Name: "prometheus_tsdb_wal_corruptions_total", Help: "Total number of WAL corruptions.", }), - walTotalReplayDuration: prometheus.NewGauge(prometheus.GaugeOpts{ + dataTotalReplayDuration: prometheus.NewGauge(prometheus.GaugeOpts{ Name: "prometheus_tsdb_data_replay_duration_seconds", Help: "Time taken to replay the data on disk.", }), @@ -372,13 +415,21 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { Name: "prometheus_tsdb_head_samples_appended_total", Help: "Total number of appended samples.", }), + outOfOrderSamplesAppended: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_head_out_of_order_samples_appended_total", + Help: "Total number of appended out of order samples.", + }), outOfBoundSamples: prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_out_of_bound_samples_total", - Help: "Total number of out of bound samples ingestion failed attempts.", + Help: "Total number of out of bound samples ingestion failed attempts with out of order support disabled.", }), outOfOrderSamples: prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_out_of_order_samples_total", - Help: "Total number of out of order samples ingestion failed attempts.", + Help: "Total number of out of order samples ingestion failed attempts due to out of order being disabled.", + }), + tooOldSamples: prometheus.NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_too_old_samples_total", + Help: "Total number of out of order samples ingestion failed attempts with out of support enabled, but sample outside of allowance.", }), headTruncateFail: prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_head_truncations_failed_total", @@ -414,7 +465,7 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { }), oooHistogram: prometheus.NewHistogram(prometheus.HistogramOpts{ Name: "prometheus_tsdb_sample_ooo_delta", - Help: "Delta in seconds by which a sample is considered out of order.", + Help: "Delta in seconds by which a sample is considered out of order (reported regardless of OOO allowance and whether sample is accepted or not).", Buckets: []float64{ // Note that mimir distributor only gives us a range of wallclock-12h to wallclock+15min 60 * 10, // 10 min @@ -441,10 +492,12 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { m.gcDuration, m.walTruncateDuration, m.walCorruptionsTotal, - m.walTotalReplayDuration, + m.dataTotalReplayDuration, m.samplesAppended, + m.outOfOrderSamplesAppended, m.outOfBoundSamples, m.outOfOrderSamples, + m.tooOldSamples, m.headTruncateFail, m.headTruncateTotal, m.checkpointDeleteFail, @@ -563,8 +616,9 @@ func (h *Head) Init(minValidTime int64) error { } mmapChunkReplayStart := time.Now() - mmappedChunks, err := h.loadMmappedChunks(refSeries) + mmappedChunks, oooMmappedChunks, lastMmapRef, err := h.loadMmappedChunks(refSeries) if err != nil { + // TODO(codesome): clear out all m-map chunks here for refSeries. level.Error(h.logger).Log("msg", "Loading on-disk chunks failed", "err", err) if _, ok := errors.Cause(err).(*chunks.CorruptionErr); ok { h.metrics.mmapChunkCorruptionTotal.Inc() @@ -575,7 +629,7 @@ func (h *Head) Init(minValidTime int64) error { // If this fails, data will be recovered from WAL. // Hence we wont lose any data (given WAL is not corrupt). - mmappedChunks, err = h.removeCorruptedMmappedChunks(err) + mmappedChunks, oooMmappedChunks, lastMmapRef, err = h.removeCorruptedMmappedChunks(err) if err != nil { return err } @@ -618,7 +672,7 @@ func (h *Head) Init(minValidTime int64) error { // A corrupted checkpoint is a hard error for now and requires user // intervention. There's likely little data that can be recovered anyway. - if err := h.loadWAL(wal.NewReader(sr), multiRef, mmappedChunks); err != nil { + if err := h.loadWAL(wal.NewReader(sr), multiRef, mmappedChunks, oooMmappedChunks); err != nil { return errors.Wrap(err, "backfill checkpoint") } h.updateWALReplayStatusRead(startFrom) @@ -651,7 +705,7 @@ func (h *Head) Init(minValidTime int64) error { if err != nil { return errors.Wrapf(err, "segment reader (offset=%d)", offset) } - err = h.loadWAL(wal.NewReader(sr), multiRef, mmappedChunks) + err = h.loadWAL(wal.NewReader(sr), multiRef, mmappedChunks, oooMmappedChunks) if err := sr.Close(); err != nil { level.Warn(h.logger).Log("msg", "Error while closing the wal segments reader", "err", err) } @@ -661,32 +715,94 @@ func (h *Head) Init(minValidTime int64) error { level.Info(h.logger).Log("msg", "WAL segment loaded", "segment", i, "maxSegment", endAt) h.updateWALReplayStatusRead(i) } + walReplayDuration := time.Since(walReplayStart) + + wblReplayStart := time.Now() + if h.wbl != nil { + // Replay OOO WAL. + startFrom, endAt, e = wal.Segments(h.wbl.Dir()) + if e != nil { + return errors.Wrap(e, "finding OOO WAL segments") + } + h.startWALReplayStatus(startFrom, endAt) + + for i := startFrom; i <= endAt; i++ { + s, err := wal.OpenReadSegment(wal.SegmentName(h.wbl.Dir(), i)) + if err != nil { + return errors.Wrap(err, fmt.Sprintf("open WBL segment: %d", i)) + } - walReplayDuration := time.Since(start) - h.metrics.walTotalReplayDuration.Set(walReplayDuration.Seconds()) + sr := wal.NewSegmentBufReader(s) + err = h.loadWbl(wal.NewReader(sr), multiRef, lastMmapRef) + if err := sr.Close(); err != nil { + level.Warn(h.logger).Log("msg", "Error while closing the wbl segments reader", "err", err) + } + if err != nil { + return err + } + level.Info(h.logger).Log("msg", "WBL segment loaded", "segment", i, "maxSegment", endAt) + h.updateWALReplayStatusRead(i) + } + } + + wblReplayDuration := time.Since(wblReplayStart) + + totalReplayDuration := time.Since(start) + h.metrics.dataTotalReplayDuration.Set(totalReplayDuration.Seconds()) level.Info(h.logger).Log( "msg", "WAL replay completed", "checkpoint_replay_duration", checkpointReplayDuration.String(), - "wal_replay_duration", time.Since(walReplayStart).String(), - "total_replay_duration", walReplayDuration.String(), + "wal_replay_duration", walReplayDuration.String(), + "wbl_replay_duration", wblReplayDuration.String(), + "total_replay_duration", totalReplayDuration.String(), ) return nil } -func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries) (map[chunks.HeadSeriesRef][]*mmappedChunk, error) { +func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries) (map[chunks.HeadSeriesRef][]*mmappedChunk, map[chunks.HeadSeriesRef][]*mmappedChunk, chunks.ChunkDiskMapperRef, error) { mmappedChunks := map[chunks.HeadSeriesRef][]*mmappedChunk{} + oooMmappedChunks := map[chunks.HeadSeriesRef][]*mmappedChunk{} + var lastRef, secondLastRef chunks.ChunkDiskMapperRef if err := h.chunkDiskMapper.IterateAllChunks(func(seriesRef chunks.HeadSeriesRef, chunkRef chunks.ChunkDiskMapperRef, mint, maxt int64, numSamples uint16, encoding chunkenc.Encoding) error { - if maxt < h.minValidTime.Load() { + secondLastRef = lastRef + lastRef = chunkRef + isOOO := chunkenc.IsOutOfOrderChunk(encoding) + if !isOOO && maxt < h.minValidTime.Load() { return nil } // We ignore any chunk that doesnt have a valid encoding - if encoding != chunkenc.EncXOR { + if !chunkenc.IsValidEncoding(encoding) { return nil } ms, ok := refSeries[seriesRef] + + if isOOO { + if !ok { + oooMmappedChunks[seriesRef] = append(oooMmappedChunks[seriesRef], &mmappedChunk{ + ref: chunkRef, + minTime: mint, + maxTime: maxt, + numSamples: numSamples, + }) + return nil + } + + h.metrics.chunks.Inc() + h.metrics.chunksCreated.Inc() + + ms.oooMmappedChunks = append(ms.oooMmappedChunks, &mmappedChunk{ + ref: chunkRef, + minTime: mint, + maxTime: maxt, + numSamples: numSamples, + }) + + return nil + } + if !ok { slice := mmappedChunks[seriesRef] if len(slice) > 0 && slice[len(slice)-1].maxTime >= mint { @@ -727,17 +843,19 @@ func (h *Head) loadMmappedChunks(refSeries map[chunks.HeadSeriesRef]*memSeries) } return nil }); err != nil { - return nil, errors.Wrap(err, "iterate on on-disk chunks") + // secondLastRef because the lastRef caused an error. + return nil, nil, secondLastRef, errors.Wrap(err, "iterate on on-disk chunks") } - return mmappedChunks, nil + return mmappedChunks, oooMmappedChunks, lastRef, nil } // removeCorruptedMmappedChunks attempts to delete the corrupted mmapped chunks and if it fails, it clears all the previously // loaded mmapped chunks. -func (h *Head) removeCorruptedMmappedChunks(err error) (map[chunks.HeadSeriesRef][]*mmappedChunk, error) { +func (h *Head) removeCorruptedMmappedChunks(err error) (map[chunks.HeadSeriesRef][]*mmappedChunk, map[chunks.HeadSeriesRef][]*mmappedChunk, chunks.ChunkDiskMapperRef, error) { + level.Info(h.logger).Log("msg", "Deleting mmapped chunk files") // We never want to preserve the in-memory series from snapshots if we are repairing m-map chunks. if err := h.resetInMemoryState(); err != nil { - return nil, err + return map[chunks.HeadSeriesRef][]*mmappedChunk{}, map[chunks.HeadSeriesRef][]*mmappedChunk{}, 0, err } level.Info(h.logger).Log("msg", "Deleting mmapped chunk files") @@ -747,11 +865,11 @@ func (h *Head) removeCorruptedMmappedChunks(err error) (map[chunks.HeadSeriesRef if err := h.chunkDiskMapper.Truncate(math.MaxInt64); err != nil { level.Error(h.logger).Log("msg", "Deletion of all mmap chunk files failed", "err", err) } - return map[chunks.HeadSeriesRef][]*mmappedChunk{}, nil + return map[chunks.HeadSeriesRef][]*mmappedChunk{}, map[chunks.HeadSeriesRef][]*mmappedChunk{}, 0, nil } level.Info(h.logger).Log("msg", "Deletion of mmap chunk files successful, reattempting m-mapping the on-disk chunks") - mmappedChunks, err := h.loadMmappedChunks(make(map[chunks.HeadSeriesRef]*memSeries)) + mmappedChunks, oooMmappedChunks, lastRef, err := h.loadMmappedChunks(make(map[chunks.HeadSeriesRef]*memSeries)) if err != nil { level.Error(h.logger).Log("msg", "Loading on-disk chunks failed, discarding chunk files completely", "err", err) if err := h.chunkDiskMapper.Truncate(math.MaxInt64); err != nil { @@ -760,12 +878,22 @@ func (h *Head) removeCorruptedMmappedChunks(err error) (map[chunks.HeadSeriesRef mmappedChunks = map[chunks.HeadSeriesRef][]*mmappedChunk{} } - return mmappedChunks, nil + return mmappedChunks, oooMmappedChunks, lastRef, nil } -func (h *Head) ApplyConfig(cfg *config.Config) error { +func (h *Head) ApplyConfig(cfg *config.Config, wbl *wal.WAL) { + oooAllowance := int64(0) + if cfg.StorageConfig.TSDBConfig != nil { + oooAllowance = cfg.StorageConfig.TSDBConfig.OutOfOrderAllowance + } + if oooAllowance < 0 { + oooAllowance = 0 + } + + h.SetOutOfOrderAllowance(oooAllowance, wbl) + if !h.opts.EnableExemplarStorage { - return nil + return } // Head uses opts.MaxExemplars in combination with opts.EnableExemplarStorage @@ -776,12 +904,21 @@ func (h *Head) ApplyConfig(cfg *config.Config) error { newSize := h.opts.MaxExemplars.Load() if prevSize == newSize { - return nil + return } migrated := h.exemplars.(*CircularExemplarStorage).Resize(newSize) level.Info(h.logger).Log("msg", "Exemplar storage resized", "from", prevSize, "to", newSize, "migrated", migrated) - return nil +} + +// SetOutOfOrderAllowance updates the out of order related parameters. +// If the Head already has a WBL set, then the wbl will be ignored. +func (h *Head) SetOutOfOrderAllowance(oooAllowance int64, wbl *wal.WAL) { + if oooAllowance > 0 && h.wbl == nil { + h.wbl = wbl + } + + h.opts.OutOfOrderAllowance.Store(oooAllowance) } // PostingsCardinalityStats returns top 10 highest cardinality stats By label and value names. @@ -823,6 +960,27 @@ func (h *Head) updateMinMaxTime(mint, maxt int64) { } } +func (h *Head) updateMinOOOMaxOOOTime(mint, maxt int64) { + for { + lt := h.MinOOOTime() + if mint >= lt { + break + } + if h.minOOOTime.CAS(lt, mint) { + break + } + } + for { + ht := h.MaxOOOTime() + if maxt <= ht { + break + } + if h.maxOOOTime.CAS(ht, maxt) { + break + } + } +} + // SetMinValidTime sets the minimum timestamp the head can ingest. func (h *Head) SetMinValidTime(minValidTime int64) { h.minValidTime.Store(minValidTime) @@ -890,7 +1048,7 @@ func (h *Head) truncateMemory(mint int64) (err error) { h.metrics.headTruncateTotal.Inc() start := time.Now() - actualMint := h.gc() + actualMint, minMmapFile := h.gc() level.Info(h.logger).Log("msg", "Head GC completed", "duration", time.Since(start)) h.metrics.gcDuration.Observe(time.Since(start).Seconds()) if actualMint > h.minTime.Load() { @@ -908,8 +1066,8 @@ func (h *Head) truncateMemory(mint int64) (err error) { } // Truncate the chunk m-mapper. - if err := h.chunkDiskMapper.Truncate(mint); err != nil { - return errors.Wrap(err, "truncate chunks.HeadReadWriter") + if err := h.chunkDiskMapper.Truncate(minMmapFile); err != nil { + return errors.Wrap(err, "truncate chunks.HeadReadWriter by file number") } return nil } @@ -1000,7 +1158,7 @@ func (h *Head) truncateWAL(mint int64) error { } // Start a new segment, so low ingestion volume TSDB don't have more WAL than // needed. - if err := h.wal.NextSegment(); err != nil { + if _, err := h.wal.NextSegment(); err != nil { return errors.Wrap(err, "next segment") } last-- // Never consider last segment for checkpoint. @@ -1066,6 +1224,41 @@ func (h *Head) truncateWAL(mint int64) error { return nil } +// truncateOOO +// * truncates the OOO WBL files whose index is strictly less than lastWBLFile +// * garbage collects all the m-map chunks from the memory that are less than or equal to minOOOMmapRef +// and then deletes the series that do not have any data anymore. +func (h *Head) truncateOOO(lastWBLFile int, minOOOMmapRef chunks.ChunkDiskMapperRef) error { + curMinOOOMmapRef := chunks.ChunkDiskMapperRef(h.minOOOMmapRef.Load()) + if minOOOMmapRef.GreaterThan(curMinOOOMmapRef) { + h.minOOOMmapRef.Store(uint64(minOOOMmapRef)) + start := time.Now() + actualMint, minMmapFile := h.gc() + level.Info(h.logger).Log("msg", "Head GC completed in truncateOOO", "duration", time.Since(start)) + h.metrics.gcDuration.Observe(time.Since(start).Seconds()) + if actualMint > h.minTime.Load() { + // The actual mint of the Head is higher than the one asked to truncate. + appendableMinValidTime := h.appendableMinValidTime() + if actualMint < appendableMinValidTime { + h.minTime.Store(actualMint) + h.minValidTime.Store(actualMint) + } else { + // The actual min time is in the appendable window. + // So we set the mint to the appendableMinValidTime. + h.minTime.Store(appendableMinValidTime) + h.minValidTime.Store(appendableMinValidTime) + } + } + + // Truncate the chunk m-mapper. + if err := h.chunkDiskMapper.Truncate(minMmapFile); err != nil { + return errors.Wrap(err, "truncate chunks.HeadReadWriter by file number in truncateOOO") + } + } + + return h.wbl.Truncate(lastWBLFile) +} + type Stats struct { NumSeries uint64 MinTime, MaxTime int64 @@ -1195,14 +1388,19 @@ func (h *Head) Delete(mint, maxt int64, ms ...*labels.Matcher) error { } // gc removes data before the minimum timestamp from the head. -// It returns the actual min times of the chunks present in the Head. -func (h *Head) gc() int64 { +// It returns +// * The actual min times of the chunks present in the Head. +// * Min mmap file number seen in the series (in-order and out-of-order) after gc'ing the series. +func (h *Head) gc() (int64, int) { // Only data strictly lower than this timestamp must be deleted. mint := h.MinTime() + // Only ooo m-map chunks strictly lower than or equal to this ref + // must be deleted. + minOOOMmapRef := chunks.ChunkDiskMapperRef(h.minOOOMmapRef.Load()) // Drop old chunks and remember series IDs and hashes if they can be // deleted entirely. - deleted, chunksRemoved, actualMint := h.series.gc(mint) + deleted, chunksRemoved, actualMint, minMmapFile := h.series.gc(mint, minOOOMmapRef) seriesRemoved := len(deleted) h.metrics.seriesRemoved.Add(float64(seriesRemoved)) @@ -1232,7 +1430,7 @@ func (h *Head) gc() int64 { h.deletedMtx.Unlock() } - return actualMint + return actualMint, minMmapFile } // Tombstones returns a new reader over the head's tombstones @@ -1270,6 +1468,18 @@ func (h *Head) MaxTime() int64 { return h.maxTime.Load() } +// MinOOOTime returns the lowest time bound on visible data in the out of order +// head. +func (h *Head) MinOOOTime() int64 { + return h.minOOOTime.Load() +} + +// MaxOOOTime returns the highest timestamp on visible data in the out of order +// head. +func (h *Head) MaxOOOTime() int64 { + return h.maxOOOTime.Load() +} + // compactable returns whether the head has a compactable range. // The head has a compactable range when the head time range is 1.5 times the chunk range. // The 0.5 acts as a buffer of the appendable window. @@ -1287,6 +1497,9 @@ func (h *Head) Close() error { if h.wal != nil { errs.Add(h.wal.Close()) } + if h.wbl != nil { + errs.Add(h.wbl.Close()) + } if errs.Err() == nil && h.opts.EnableMemorySnapshotOnShutdown { errs.Add(h.performChunkSnapshot()) } @@ -1317,7 +1530,9 @@ func (h *Head) getOrCreate(hash uint64, lset labels.Labels) (*memSeries, bool, e func (h *Head) getOrCreateWithID(id chunks.HeadSeriesRef, hash uint64, lset labels.Labels) (*memSeries, bool, error) { s, created, err := h.series.getOrSet(hash, lset, func() *memSeries { - return newMemSeries(lset, id, hash, h.chunkRange.Load(), h.opts.ChunkEndTimeVariance, &h.memChunkPool, h.opts.IsolationDisabled) + return newMemSeries(lset, id, hash, h.chunkRange.Load(), + h.opts.OutOfOrderCapMin.Load(), h.opts.OutOfOrderCapMax.Load(), + h.opts.ChunkEndTimeVariance, &h.memChunkPool, h.opts.IsolationDisabled) }) if err != nil { return nil, false, err @@ -1379,7 +1594,7 @@ const ( ) // stripeSeries holds series by HeadSeriesRef ("ID") and also by hash of their labels. -// ID-based lookups via (getByID()) are preferred over getByHash() for performance reasons. +// ID-based lookups via getByID() are preferred over getByHash() for performance reasons. // It locks modulo ranges of IDs and hashes to reduce lock contention. // The locks are padded to not be on the same cache line. Filling the padded space // with the maps was profiled to be slower – likely due to the additional pointer @@ -1421,13 +1636,15 @@ func newStripeSeries(stripeSize int, seriesCallback SeriesLifecycleCallback) *st // note: returning map[chunks.HeadSeriesRef]struct{} would be more accurate, // but the returned map goes into postings.Delete() which expects a map[storage.SeriesRef]struct // and there's no easy way to cast maps. -func (s *stripeSeries) gc(mint int64) (map[storage.SeriesRef]struct{}, int, int64) { +// minMmapFile is the min mmap file number seen in the series (in-order and out-of-order) after gc'ing the series. +func (s *stripeSeries) gc(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) (_ map[storage.SeriesRef]struct{}, _ int, _ int64, minMmapFile int) { var ( deleted = map[storage.SeriesRef]struct{}{} deletedForCallback = []labels.Labels{} rmChunks = 0 actualMint int64 = math.MaxInt64 ) + minMmapFile = math.MaxInt32 // Run through all series and truncate old chunks. Mark those with no // chunks left as deleted and store their ID. for i := 0; i < s.size; i++ { @@ -1436,9 +1653,22 @@ func (s *stripeSeries) gc(mint int64) (map[storage.SeriesRef]struct{}, int, int6 for hash, all := range s.hashes[i] { for _, series := range all { series.Lock() - rmChunks += series.truncateChunksBefore(mint) + rmChunks += series.truncateChunksBefore(mint, minOOOMmapRef) - if len(series.mmappedChunks) > 0 || series.headChunk != nil || series.pendingCommit { + if len(series.mmappedChunks) > 0 { + seq, _ := series.mmappedChunks[0].ref.Unpack() + if seq < minMmapFile { + minMmapFile = seq + } + } + if len(series.oooMmappedChunks) > 0 { + seq, _ := series.oooMmappedChunks[0].ref.Unpack() + if seq < minMmapFile { + minMmapFile = seq + } + } + if len(series.mmappedChunks) > 0 || len(series.oooMmappedChunks) > 0 || + series.headChunk != nil || series.oooHeadChunk != nil || series.pendingCommit { seriesMint := series.minTime() if seriesMint < actualMint { actualMint = seriesMint @@ -1481,7 +1711,7 @@ func (s *stripeSeries) gc(mint int64) (map[storage.SeriesRef]struct{}, int, int6 actualMint = mint } - return deleted, rmChunks, actualMint + return deleted, rmChunks, actualMint, minMmapFile } func (s *stripeSeries) getByID(id chunks.HeadSeriesRef) *memSeries { @@ -1574,11 +1804,18 @@ type memSeries struct { // // pN is the pointer to the mmappedChunk referered to by HeadChunkID=N mmappedChunks []*mmappedChunk - mmMaxTime int64 // Max time of any mmapped chunk, only used during WAL replay. - headChunk *memChunk // Most recent chunk in memory that's still being built. - chunkRange int64 + headChunk *memChunk // Most recent chunk in memory that's still being built. firstChunkID chunks.HeadChunkID // HeadChunkID for mmappedChunks[0] + oooMmappedChunks []*mmappedChunk // Immutable chunks on disk containing OOO samples. + oooHeadChunk *oooHeadChunk // Most recent chunk for ooo samples in memory that's still being built. + firstOOOChunkID chunks.HeadChunkID // HeadOOOChunkID for oooMmappedChunks[0] + + mmMaxTime int64 // Max time of any mmapped chunk, only used during WAL replay. + chunkRange int64 + oooCapMin uint8 + oooCapMax uint8 + // chunkEndTimeVariance is how much variance (between 0 and 1) should be applied to the chunk end time, // to spread chunks writing across time. Doesn't apply to the last chunk of the chunk range. 0 to disable variance. chunkEndTimeVariance float64 @@ -1602,7 +1839,7 @@ type memSeries struct { txs *txRing } -func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, hash uint64, chunkRange int64, chunkEndTimeVariance float64, memChunkPool *sync.Pool, isolationDisabled bool) *memSeries { +func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, hash uint64, chunkRange, oooCapMin, oooCapMax int64, chunkEndTimeVariance float64, memChunkPool *sync.Pool, isolationDisabled bool) *memSeries { s := &memSeries{ lset: lset, hash: hash, @@ -1611,6 +1848,8 @@ func newMemSeries(lset labels.Labels, id chunks.HeadSeriesRef, hash uint64, chun chunkEndTimeVariance: chunkEndTimeVariance, nextAt: math.MinInt64, memChunkPool: memChunkPool, + oooCapMin: uint8(oooCapMin), + oooCapMax: uint8(oooCapMax), } if !isolationDisabled { s.txs = newTxRing(4) @@ -1629,6 +1868,7 @@ func (s *memSeries) minTime() int64 { } func (s *memSeries) maxTime() int64 { + // The highest timestamps will always be in the regular (non-OOO) chunks, even if OOO is enabled. c := s.head() if c != nil { return c.maxTime @@ -1642,26 +1882,39 @@ func (s *memSeries) maxTime() int64 { // truncateChunksBefore removes all chunks from the series that // have no timestamp at or after mint. // Chunk IDs remain unchanged. -func (s *memSeries) truncateChunksBefore(mint int64) (removed int) { +func (s *memSeries) truncateChunksBefore(mint int64, minOOOMmapRef chunks.ChunkDiskMapperRef) int { + var removedInOrder int if s.headChunk != nil && s.headChunk.maxTime < mint { // If head chunk is truncated, we can truncate all mmapped chunks. - removed = 1 + len(s.mmappedChunks) - s.firstChunkID += chunks.HeadChunkID(removed) + removedInOrder = 1 + len(s.mmappedChunks) + s.firstChunkID += chunks.HeadChunkID(removedInOrder) s.headChunk = nil s.mmappedChunks = nil - return removed } if len(s.mmappedChunks) > 0 { for i, c := range s.mmappedChunks { if c.maxTime >= mint { break } - removed = i + 1 + removedInOrder = i + 1 } - s.mmappedChunks = append(s.mmappedChunks[:0], s.mmappedChunks[removed:]...) - s.firstChunkID += chunks.HeadChunkID(removed) + s.mmappedChunks = append(s.mmappedChunks[:0], s.mmappedChunks[removedInOrder:]...) + s.firstChunkID += chunks.HeadChunkID(removedInOrder) } - return removed + + var removedOOO int + if len(s.oooMmappedChunks) > 0 { + for i, c := range s.oooMmappedChunks { + if c.ref.GreaterThan(minOOOMmapRef) { + break + } + removedOOO = i + 1 + } + s.oooMmappedChunks = append(s.oooMmappedChunks[:0], s.oooMmappedChunks[removedOOO:]...) + s.firstOOOChunkID += chunks.HeadChunkID(removedOOO) + } + + return removedInOrder + removedOOO } // cleanupAppendIDsBelow cleans up older appendIDs. Has to be called after @@ -1681,6 +1934,16 @@ type memChunk struct { minTime, maxTime int64 } +type oooHeadChunk struct { + chunk *chunkenc.OOOChunk + minTime, maxTime int64 // can probably be removed and pulled out of the chunk instead +} + +// OverlapsClosedInterval returns true if the chunk overlaps [mint, maxt]. +func (mc *oooHeadChunk) OverlapsClosedInterval(mint, maxt int64) bool { + return overlapsClosedInterval(mc.minTime, mc.maxTime, mint, maxt) +} + // OverlapsClosedInterval returns true if the chunk overlaps [mint, maxt]. func (mc *memChunk) OverlapsClosedInterval(mint, maxt int64) bool { return overlapsClosedInterval(mc.minTime, mc.maxTime, mint, maxt) @@ -1709,12 +1972,15 @@ func (noopSeriesLifecycleCallback) PostCreation(labels.Labels) {} func (noopSeriesLifecycleCallback) PostDeletion(...labels.Labels) {} func (h *Head) Size() int64 { - var walSize int64 + var walSize, wblSize int64 if h.wal != nil { walSize, _ = h.wal.Size() } + if h.wbl != nil { + wblSize, _ = h.wbl.Size() + } cdmSize, _ := h.chunkDiskMapper.Size() - return walSize + cdmSize + return walSize + wblSize + cdmSize } func (h *RangeHead) Size() int64 { diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go index 0039896329..b16cb201d7 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go @@ -127,6 +127,7 @@ func (h *Head) appender() *headAppender { minValidTime: h.appendableMinValidTime(), mint: math.MaxInt64, maxt: math.MinInt64, + headMaxt: h.MaxTime(), samples: h.getAppendBuffer(), sampleSeries: h.getSeriesBuffer(), exemplars: exemplarsBuf, @@ -238,6 +239,7 @@ type headAppender struct { head *Head minValidTime int64 // No samples below this timestamp are allowed. mint, maxt int64 + headMaxt int64 // We track it here to not take the lock for every sample appended. series []record.RefSeries // New series held by this appender. samples []record.RefSample // New samples held by this appender. @@ -249,7 +251,10 @@ type headAppender struct { } func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { - if t < a.minValidTime { + // For OOO inserts, this restriction is irrelevant and will be checked later once we confirm the sample is an in-order append. + // If OOO inserts are disabled, we may as well as check this as early as we can and avoid more work. + oooAllowance := a.head.opts.OutOfOrderAllowance.Load() + if oooAllowance == 0 && t < a.minValidTime { a.head.metrics.outOfBoundSamples.Inc() return 0, storage.ErrOutOfBounds } @@ -281,16 +286,25 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 } s.Lock() - if delta, err := s.appendable(t, v); err != nil { - s.Unlock() + // TODO: if we definitely know at this point that the sample is ooo, then optimise + // to skip that sample from the WAL and write only in the WBL. + _, delta, err := s.appendable(t, v, a.headMaxt, a.minValidTime, oooAllowance) + if err == nil { + s.pendingCommit = true + } + s.Unlock() + if delta > 0 { + a.head.metrics.oooHistogram.Observe(float64(delta) / 1000) + } + if err != nil { if err == storage.ErrOutOfOrderSample { a.head.metrics.outOfOrderSamples.Inc() - a.head.metrics.oooHistogram.Observe(float64(delta) / 1000) + } + if err == storage.ErrTooOldSample { + a.head.metrics.tooOldSamples.Inc() } return 0, err } - s.pendingCommit = true - s.Unlock() if t < a.mint { a.mint = t @@ -308,24 +322,53 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 return storage.SeriesRef(s.ref), nil } -// appendable checks whether the given sample is valid for appending to the series. -func (s *memSeries) appendable(t int64, v float64) (int64, error) { - c := s.head() - if c == nil { - return 0, nil +// appendable checks whether the given sample is valid for appending to the series. (if we return false and no error) +// The sample belongs to the out of order chunk if we return true and no error. +// An error signifies the sample cannot be handled. +func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooAllowance int64) (isOutOfOrder bool, delta int64, err error) { + msMaxt := s.maxTime() + if msMaxt == math.MinInt64 { + // The series has no sample and was freshly created. + if t >= minValidTime { + // We can append it in the in-order chunk. + return false, 0, nil + } + + // We cannot append it in the in-order head. So we check the oooAllowance + // w.r.t. the head's maxt. + // -1 because for the first sample in the Head, headMaxt will be equal to t. + msMaxt = headMaxt - 1 } - if t > c.maxTime { - return 0, nil + + if t > msMaxt { + return false, 0, nil } - if t < c.maxTime { - return c.maxTime - t, storage.ErrOutOfOrderSample + + if t < msMaxt-oooAllowance { + if oooAllowance > 0 { + return true, msMaxt - t, storage.ErrTooOldSample + } + if t < minValidTime { + return false, msMaxt - t, storage.ErrOutOfBounds + } + return false, msMaxt - t, storage.ErrOutOfOrderSample } + + if t != msMaxt || s.head() == nil { + // Sample is ooo and within allowance OR series has no active chunk to check for duplicate sample. + return true, msMaxt - t, nil + } + // We are allowing exact duplicates as we can encounter them in valid cases // like federation and erroring out at that time would be extremely noisy. + // this only checks against the latest in-order sample. + // the OOO headchunk has its own method to detect these duplicates if math.Float64bits(s.sampleBuf[3].v) != math.Float64bits(v) { - return 0, storage.ErrDuplicateSampleForTimestamp + return false, 0, storage.ErrDuplicateSampleForTimestamp } - return 0, nil + + // sample is identical (ts + value) with most current (highest ts) sample in sampleBuf + return false, 0, nil } // AppendExemplar for headAppender assumes the series ref already exists, and so it doesn't @@ -458,33 +501,201 @@ func (a *headAppender) Commit() (err error) { defer a.head.putExemplarBuffer(a.exemplars) defer a.head.iso.closeAppend(a.appendID) - total := len(a.samples) - var series *memSeries + var ( + samplesAppended = len(a.samples) + oooAccepted int // number of samples out of order but accepted: with ooo enabled and within allowance + oooRejected int // number of samples rejected due to: out of order but OOO support disabled. + tooOldRejected int // number of samples rejected due to: that are out of order but too old (OOO support enabled, but outside allowance) + oobRejected int // number of samples rejected due to: out of bounds: with t < minValidTime (OOO support disabled) + inOrderMint int64 = math.MaxInt64 + inOrderMaxt int64 = math.MinInt64 + ooomint int64 = math.MaxInt64 + ooomaxt int64 = math.MinInt64 + wblSamples []record.RefSample + oooMmapMarkers map[chunks.HeadSeriesRef]chunks.ChunkDiskMapperRef + oooRecords [][]byte + series *memSeries + enc record.Encoder + ) + defer func() { + for i := range oooRecords { + a.head.putBytesBuffer(oooRecords[i][:0]) + } + }() + collectOOORecords := func() { + if a.head.wbl == nil { + // WBL is not enabled. So no need to collect. + wblSamples = nil + oooMmapMarkers = nil + return + } + // The m-map happens before adding a new sample. So we collect + // the m-map markers first, and then samples. + // WBL Graphically: + // WBL Before this Commit(): [old samples before this commit for chunk 1] + // WBL After this Commit(): [old samples before this commit for chunk 1][new samples in this commit for chunk 1]mmapmarker1[samples for chunk 2]mmapmarker2[samples for chunk 3] + if oooMmapMarkers != nil { + markers := make([]record.RefMmapMarker, 0, len(oooMmapMarkers)) + for ref, mmapRef := range oooMmapMarkers { + markers = append(markers, record.RefMmapMarker{ + Ref: ref, + MmapRef: mmapRef, + }) + } + r := enc.MmapMarkers(markers, a.head.getBytesBuffer()) + oooRecords = append(oooRecords, r) + } + + if len(wblSamples) > 0 { + r := enc.Samples(wblSamples, a.head.getBytesBuffer()) + oooRecords = append(oooRecords, r) + } + + wblSamples = nil + oooMmapMarkers = nil + } + oooAllowance := a.head.opts.OutOfOrderAllowance.Load() for i, s := range a.samples { series = a.sampleSeries[i] series.Lock() - delta, ok, chunkCreated := series.append(s.T, s.V, a.appendID, a.head.chunkDiskMapper) - series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) - series.pendingCommit = false - series.Unlock() - if !ok { - total-- + oooSample, delta, err := series.appendable(s.T, s.V, a.headMaxt, a.minValidTime, oooAllowance) + switch err { + case storage.ErrOutOfOrderSample: + samplesAppended-- + oooRejected++ + case storage.ErrOutOfBounds: + samplesAppended-- + oobRejected++ + case storage.ErrTooOldSample: + samplesAppended-- + tooOldRejected++ + case nil: + // Do nothing. + default: + samplesAppended-- + } + + var ok, chunkCreated bool + + if err == nil && oooSample { + // Sample is OOO and OOO handling is enabled + // and the delta is within the OOO tolerance. + var mmapRef chunks.ChunkDiskMapperRef + ok, chunkCreated, mmapRef = series.insert(s.T, s.V, a.head.chunkDiskMapper) + if chunkCreated { + r, ok := oooMmapMarkers[series.ref] + if !ok || r != 0 { + // !ok means there are no markers collected for these samples yet. So we first flush the samples + // before setting this m-map marker. + + // r != 0 means we have already m-mapped a chunk for this series in the same Commit(). + // Hence, before we m-map again, we should add the samples and m-map markers + // seen till now to the WBL records. + collectOOORecords() + } + + if oooMmapMarkers == nil { + oooMmapMarkers = make(map[chunks.HeadSeriesRef]chunks.ChunkDiskMapperRef) + } + oooMmapMarkers[series.ref] = mmapRef + } + if ok { + wblSamples = append(wblSamples, s) + if s.T < ooomint { + ooomint = s.T + } + if s.T > ooomaxt { + ooomaxt = s.T + } + oooAccepted++ + } else { + // exact duplicate of last sample. + // the sample was an attempted update. + // note that we can only detect updates if they clash with a sample in the OOOHeadChunk, + // not with samples in already flushed OOO chunks. + // TODO: error reporting? depends on addressing https://github.com/prometheus/prometheus/discussions/10305 + samplesAppended-- + } + } else if err == nil { + // if we're here, either of these is true: + // - the sample.t is beyond any previously ingested timestamp + // - the sample is an exact duplicate of the 'head sample' + + delta, ok, chunkCreated = series.append(s.T, s.V, a.appendID, a.head.chunkDiskMapper) + + // TODO: handle overwrite. + // this would be storage.ErrDuplicateSampleForTimestamp, it has no attached counter + // in case of identical timestamp and value, we should drop silently + if ok { + // sample timestamp is beyond any previously ingested timestamp + if s.T < inOrderMint { // TODO(ganesh): dieter thinks this never applies and can be removed because we know we're in order. + inOrderMint = s.T + } + if s.T > inOrderMaxt { + inOrderMaxt = s.T + } + } else { + // ... therefore, in this case, we know the sample is an exact duplicate, and should be silently dropped. + samplesAppended-- + } + } + + if delta > 0 { a.head.metrics.oooHistogram.Observe(float64(delta) / 1000) - a.head.metrics.outOfOrderSamples.Inc() } if chunkCreated { a.head.metrics.chunks.Inc() a.head.metrics.chunksCreated.Inc() } - } - a.head.metrics.samplesAppended.Add(float64(total)) - a.head.updateMinMaxTime(a.mint, a.maxt) + series.cleanupAppendIDsBelow(a.cleanupAppendIDsBelow) + series.pendingCommit = false + series.Unlock() + } + a.head.metrics.outOfOrderSamples.Add(float64(oooRejected)) + a.head.metrics.outOfBoundSamples.Add(float64(oobRejected)) + a.head.metrics.tooOldSamples.Add(float64(tooOldRejected)) + a.head.metrics.samplesAppended.Add(float64(samplesAppended)) + a.head.metrics.outOfOrderSamplesAppended.Add(float64(oooAccepted)) + a.head.updateMinMaxTime(inOrderMint, inOrderMaxt) + a.head.updateMinOOOMaxOOOTime(ooomint, ooomaxt) + + // TODO: currently WBL logging of ooo samples is best effort here since we cannot try logging + // until we have found what samples become OOO. We can try having a metric for this failure. + // Returning the error here is not correct because we have already put the samples into the memory, + // hence the append/insert was a success. + collectOOORecords() + if a.head.wbl != nil { + if err := a.head.wbl.Log(oooRecords...); err != nil { + level.Error(a.head.logger).Log("msg", "Failed to log out of order samples into the WAL", "err", err) + } + } return nil } +// insert is like append, except it inserts. used for Out Of Order samples. +func (s *memSeries) insert(t int64, v float64, chunkDiskMapper chunkDiskMapper) (inserted, chunkCreated bool, mmapRef chunks.ChunkDiskMapperRef) { + c := s.oooHeadChunk + if c == nil || c.chunk.NumSamples() == int(s.oooCapMax) { + // Note: If no new samples come in then we rely on compaction to clean up stale in-memory OOO chunks. + c, mmapRef = s.cutNewOOOHeadChunk(t, chunkDiskMapper) + chunkCreated = true + } + + ok := c.chunk.Insert(t, v) + if ok { + if chunkCreated || t < c.minTime { + c.minTime = t + } + if chunkCreated || t > c.maxTime { + c.maxTime = t + } + } + return ok, chunkCreated, mmapRef +} + // append adds the sample (t, v) to the series. The caller also has to provide // the appendID for isolation. (The appendID can be zero, which results in no // isolation for this append.) @@ -502,7 +713,7 @@ func (s *memSeries) append(t int64, v float64, appendID uint64, chunkDiskMapper // Out of order sample. Sample timestamp is already in the mmapped chunks, so ignore it. return s.mmappedChunks[len(s.mmappedChunks)-1].maxTime - t, false, false } - // There is no chunk in this series yet, create the first chunk for the sample. + // There is no head chunk in this series yet, create the first chunk for the sample. c = s.cutNewHeadChunk(t, chunkDiskMapper) chunkCreated = true } @@ -613,6 +824,36 @@ func (s *memSeries) cutNewHeadChunk(mint int64, chunkDiskMapper chunkDiskMapper) return s.headChunk } +func (s *memSeries) cutNewOOOHeadChunk(mint int64, chunkDiskMapper chunkDiskMapper) (*oooHeadChunk, chunks.ChunkDiskMapperRef) { + ref := s.mmapCurrentOOOHeadChunk(chunkDiskMapper) + + s.oooHeadChunk = &oooHeadChunk{ + chunk: chunkenc.NewOOOChunk(int(s.oooCapMin)), + minTime: mint, + maxTime: math.MinInt64, + } + + return s.oooHeadChunk, ref +} + +func (s *memSeries) mmapCurrentOOOHeadChunk(chunkDiskMapper chunkDiskMapper) chunks.ChunkDiskMapperRef { + if s.oooHeadChunk == nil { + // There is no head chunk, so nothing to m-map here. + return 0 + } + xor, _ := s.oooHeadChunk.chunk.ToXor() // encode to XorChunk which is more compact and implements all of the needed functionality to be encoded + oooXor := &chunkenc.OOOXORChunk{XORChunk: xor} + chunkRef := chunkDiskMapper.WriteChunk(s.ref, s.oooHeadChunk.minTime, s.oooHeadChunk.maxTime, oooXor, handleChunkWriteError) + s.oooMmappedChunks = append(s.oooMmappedChunks, &mmappedChunk{ + ref: chunkRef, + numSamples: uint16(xor.NumSamples()), + minTime: s.oooHeadChunk.minTime, + maxTime: s.oooHeadChunk.maxTime, + }) + s.oooHeadChunk = nil + return chunkRef +} + func (s *memSeries) mmapCurrentHeadChunk(chunkDiskMapper chunkDiskMapper) { if s.headChunk == nil { // There is no head chunk, so nothing to m-map here. diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go index 0209a6a15c..cb953222ed 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_read.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_read.go @@ -212,11 +212,20 @@ func (h *headIndexReader) Series(ref storage.SeriesRef, lbls *labels.Labels, chk return nil } -// headChunkID returns the HeadChunkID corresponding to .mmappedChunks[pos] +// headChunkID returns the HeadChunkID referred to by the given position. +// * 0 <= pos < len(s.mmappedChunks) refer to s.mmappedChunks[pos] +// * pos == len(s.mmappedChunks) refers to s.headChunk func (s *memSeries) headChunkID(pos int) chunks.HeadChunkID { return chunks.HeadChunkID(pos) + s.firstChunkID } +// oooHeadChunkID returns the HeadChunkID referred to by the given position. +// * 0 <= pos < len(s.oooMmappedChunks) refer to s.oooMmappedChunks[pos] +// * pos == len(s.oooMmappedChunks) refers to s.oooHeadChunk +func (s *memSeries) oooHeadChunkID(pos int) chunks.HeadChunkID { + return chunks.HeadChunkID(pos) + s.firstOOOChunkID +} + // LabelValueFor returns label value for the given label name in the series referred to by ID. func (h *headIndexReader) LabelValueFor(id storage.SeriesRef, label string) (string, error) { memSeries := h.head.series.getByID(chunks.HeadSeriesRef(id)) @@ -287,8 +296,8 @@ func (h *headChunkReader) Close() error { } // Chunk returns the chunk for the reference number. -func (h *headChunkReader) Chunk(ref chunks.ChunkRef) (chunkenc.Chunk, error) { - sid, cid := chunks.HeadChunkRef(ref).Unpack() +func (h *headChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) { + sid, cid := chunks.HeadChunkRef(meta.Ref).Unpack() s := h.head.series.getByID(sid) // This means that the series has been garbage collected. @@ -358,6 +367,258 @@ func (s *memSeries) chunk(id chunks.HeadChunkID, cdm chunkDiskMapper) (chunk *me return mc, true, nil } +// oooMergedChunk returns the requested chunk based on the given chunks.Meta +// reference from memory or by m-mapping it from the disk. The returned chunk +// might be a merge of all the overlapping chunks, if any, amongst all the +// chunks in the OOOHead. +// This function is not thread safe unless the caller holds a lock. +func (s *memSeries) oooMergedChunk(meta chunks.Meta, cdm chunkDiskMapper, mint, maxt int64) (chunk *mergedOOOChunks, err error) { + _, cid := chunks.HeadChunkRef(meta.Ref).Unpack() + + // ix represents the index of chunk in the s.mmappedChunks slice. The chunk meta's are + // incremented by 1 when new chunk is created, hence (meta - firstChunkID) gives the slice index. + // The max index for the s.mmappedChunks slice can be len(s.mmappedChunks)-1, hence if the ix + // is len(s.mmappedChunks), it represents the next chunk, which is the head chunk. + ix := int(cid) - int(s.firstOOOChunkID) + if ix < 0 || ix > len(s.oooMmappedChunks) { + return nil, storage.ErrNotFound + } + + if ix == len(s.oooMmappedChunks) { + if s.oooHeadChunk == nil { + return nil, errors.New("invalid ooo head chunk") + } + } + + // We create a temporary slice of chunk metas to hold the information of all + // possible chunks that may overlap with the requested chunk. + tmpChks := make([]chunkMetaAndChunkDiskMapperRef, 0, len(s.oooMmappedChunks)) + + oooHeadRef := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.oooMmappedChunks)))) + if s.oooHeadChunk != nil && s.oooHeadChunk.OverlapsClosedInterval(mint, maxt) { + // We only want to append the head chunk if this chunk existed when + // Series() was called. This brings consistency in case new data + // is added in between Series() and Chunk() calls + if oooHeadRef == meta.OOOLastRef { + tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{ + meta: chunks.Meta{ + MinTime: meta.OOOLastMinTime, // we want to ignore samples that were added before last known min time + MaxTime: meta.OOOLastMaxTime, // we want to ignore samples that were added after last known max time + Ref: oooHeadRef, + }, + }) + } + } + + for i, c := range s.oooMmappedChunks { + chunkRef := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i))) + // We can skip chunks that came in later than the last known OOOLastRef + if chunkRef > meta.OOOLastRef { + break + } + + if chunkRef == meta.OOOLastRef { + tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{ + meta: chunks.Meta{ + MinTime: meta.OOOLastMinTime, + MaxTime: meta.OOOLastMaxTime, + Ref: chunkRef, + }, + ref: c.ref, + origMinT: c.minTime, + origMaxT: c.maxTime, + }) + } else if c.OverlapsClosedInterval(mint, maxt) { + tmpChks = append(tmpChks, chunkMetaAndChunkDiskMapperRef{ + meta: chunks.Meta{ + MinTime: c.minTime, + MaxTime: c.maxTime, + Ref: chunkRef, + }, + ref: c.ref, + }) + } + } + + // Next we want to sort all the collected chunks by min time so we can find + // those that overlap and stop when we know the rest don't. + sort.Sort(byMinTimeAndMinRef(tmpChks)) + + mc := &mergedOOOChunks{} + absoluteMax := int64(math.MinInt64) + for _, c := range tmpChks { + if c.meta.Ref == meta.Ref || len(mc.chunks) > 0 && c.meta.MinTime <= absoluteMax { + if c.meta.Ref == oooHeadRef { + var xor *chunkenc.XORChunk + // If head chunk min and max time match the meta OOO markers + // that means that the chunk has not expanded so we can append + // it as it is. + if s.oooHeadChunk.minTime == meta.OOOLastMinTime && s.oooHeadChunk.maxTime == meta.OOOLastMaxTime { + xor, err = s.oooHeadChunk.chunk.ToXor() // TODO(jesus.vazquez) (This is an optimization idea that has no priority and might not be that useful) See if we could use a copy of the underlying slice. That would leave the more expensive ToXor() function only for the usecase where Bytes() is called. + } else { + // We need to remove samples that are outside of the markers + xor, err = s.oooHeadChunk.chunk.ToXorBetweenTimestamps(meta.OOOLastMinTime, meta.OOOLastMaxTime) + } + if err != nil { + return nil, errors.Wrap(err, "failed to convert ooo head chunk to xor chunk") + } + c.meta.Chunk = xor + } else { + chk, err := cdm.Chunk(c.ref) + if err != nil { + if _, ok := err.(*chunks.CorruptionErr); ok { + return nil, errors.Wrap(err, "invalid ooo mmapped chunk") + } + return nil, err + } + if c.meta.Ref == meta.OOOLastRef && + (c.origMinT != meta.OOOLastMinTime || c.origMaxT != meta.OOOLastMaxTime) { + // The head expanded and was memory mapped so now we need to + // wrap the chunk within a chunk that doesnt allows us to iterate + // through samples out of the OOOLastMinT and OOOLastMaxT + // markers. + c.meta.Chunk = boundedChunk{chk, meta.OOOLastMinTime, meta.OOOLastMaxTime} + } else { + c.meta.Chunk = chk + } + } + mc.chunks = append(mc.chunks, c.meta) + if c.meta.MaxTime > absoluteMax { + absoluteMax = c.meta.MaxTime + } + } + } + + return mc, nil +} + +var _ chunkenc.Chunk = &mergedOOOChunks{} + +// mergedOOOChunks holds the list of overlapping chunks. This struct satisfies +// chunkenc.Chunk. +type mergedOOOChunks struct { + chunks []chunks.Meta +} + +// Bytes is a very expensive method because its calling the iterator of all the +// chunks in the mergedOOOChunk and building a new chunk with the samples. +func (o mergedOOOChunks) Bytes() []byte { + xc := chunkenc.NewXORChunk() + app, err := xc.Appender() + if err != nil { + panic(err) + } + it := o.Iterator(nil) + for it.Next() { + t, v := it.At() + app.Append(t, v) + } + + return xc.Bytes() +} + +func (o mergedOOOChunks) Encoding() chunkenc.Encoding { + return chunkenc.EncXOR +} + +func (o mergedOOOChunks) Appender() (chunkenc.Appender, error) { + return nil, errors.New("can't append to mergedOOOChunks") +} + +func (o mergedOOOChunks) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator { + iterators := make([]chunkenc.Iterator, 0, len(o.chunks)) + for _, c := range o.chunks { + iterators = append(iterators, c.Chunk.Iterator(nil)) + } + return storage.NewChainSampleIterator(iterators) +} + +func (o mergedOOOChunks) NumSamples() int { + samples := 0 + for _, c := range o.chunks { + samples += c.Chunk.NumSamples() + } + return samples +} + +func (o mergedOOOChunks) Compact() {} + +var _ chunkenc.Chunk = &boundedChunk{} + +// boundedChunk is an implementation of chunkenc.Chunk that uses a +// boundedIterator that only iterates through samples which timestamps are +// >= minT and <= maxT +type boundedChunk struct { + chunkenc.Chunk + minT int64 + maxT int64 +} + +func (b boundedChunk) Bytes() []byte { + xor := chunkenc.NewXORChunk() + a, _ := xor.Appender() + it := b.Iterator(nil) + for it.Next() { + t, v := it.At() + a.Append(t, v) + } + return xor.Bytes() +} + +func (b boundedChunk) Iterator(iterator chunkenc.Iterator) chunkenc.Iterator { + it := b.Chunk.Iterator(iterator) + if it == nil { + panic("iterator shouldn't be nil") + } + return boundedIterator{it, b.minT, b.maxT} +} + +var _ chunkenc.Iterator = &boundedIterator{} + +// boundedIterator is an implementation of Iterator that only iterates through +// samples which timestamps are >= minT and <= maxT +type boundedIterator struct { + chunkenc.Iterator + minT int64 + maxT int64 +} + +// Next the first time its called it will advance as many positions as necessary +// until its able to find a sample within the bounds minT and maxT. +// If there are samples within bounds it will advance one by one amongst them. +// If there are no samples within bounds it will return false. +func (b boundedIterator) Next() bool { + for b.Iterator.Next() { + t, _ := b.Iterator.At() + if t < b.minT { + continue + } else if t > b.maxT { + return false + } + return true + } + return false +} + +func (b boundedIterator) Seek(t int64) bool { + if t < b.minT { + // We must seek at least up to b.minT if it is asked for something before that. + ok := b.Iterator.Seek(b.minT) + if !ok { + return false + } + t, _ := b.Iterator.At() + return t <= b.maxT + } + if t > b.maxT { + // We seek anyway so that the subsequent Next() calls will also return false. + b.Iterator.Seek(t) + return false + } + return b.Iterator.Seek(t) +} + +// safeChunk makes sure that the chunk can be accessed without a race condition type safeChunk struct { chunkenc.Chunk s *memSeries diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go index 414c9b679a..1a78f01791 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go @@ -41,7 +41,7 @@ import ( "github.com/prometheus/prometheus/tsdb/wal" ) -func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, mmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (err error) { +func (h *Head) loadWAL(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, mmappedChunks, oooMmappedChunks map[chunks.HeadSeriesRef][]*mmappedChunk) (err error) { // Track number of samples that referenced a series we don't know about // for error reporting. var unknownRefs atomic.Uint64 @@ -215,10 +215,11 @@ Outer: processors[idx].mx.Lock() mmc := mmappedChunks[walSeries.Ref] + oooMmc := oooMmappedChunks[walSeries.Ref] if created { // This is the first WAL series record for this series. - h.resetSeriesWithMMappedChunks(mSeries, mmc) + h.resetSeriesWithMMappedChunks(mSeries, mmc, oooMmc) processors[idx].mx.Unlock() continue } @@ -252,7 +253,7 @@ Outer: } // Replacing m-mapped chunks with the new ones (could be empty). - h.resetSeriesWithMMappedChunks(mSeries, mmc) + h.resetSeriesWithMMappedChunks(mSeries, mmc, oooMmc) processors[idx].mx.Unlock() } @@ -343,11 +344,12 @@ Outer: } // resetSeriesWithMMappedChunks is only used during the WAL replay. -func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc []*mmappedChunk) { - h.metrics.chunksCreated.Add(float64(len(mmc))) - h.metrics.chunksRemoved.Add(float64(len(mSeries.mmappedChunks))) - h.metrics.chunks.Add(float64(len(mmc) - len(mSeries.mmappedChunks))) +func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*mmappedChunk) { + h.metrics.chunksCreated.Add(float64(len(mmc) + len(oooMmc))) + h.metrics.chunksRemoved.Add(float64(len(mSeries.mmappedChunks) + len(mSeries.oooMmappedChunks))) + h.metrics.chunks.Add(float64(len(mmc) + len(oooMmc) - len(mSeries.mmappedChunks) - len(mSeries.oooMmappedChunks))) mSeries.mmappedChunks = mmc + mSeries.oooMmappedChunks = oooMmc // Cache the last mmapped chunk time, so we can skip calling append() for samples it will reject. if len(mmc) == 0 { mSeries.mmMaxTime = math.MinInt64 @@ -357,6 +359,8 @@ func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc []*mmappedCh } // Any samples replayed till now would already be compacted. Resetting the head chunk. + // We do not reset oooHeadChunk because that is being replayed from a different WAL + // and has not been replayed here. mSeries.nextAt = 0 mSeries.headChunk = nil mSeries.app = nil @@ -446,6 +450,278 @@ func (wp *walSubsetProcessor) waitUntilIdle() { } } +func (h *Head) loadWbl(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, lastMmapRef chunks.ChunkDiskMapperRef) (err error) { + // Track number of samples that referenced a series we don't know about + // for error reporting. + var unknownRefs atomic.Uint64 + + lastSeq, lastOff := lastMmapRef.Unpack() + // Start workers that each process samples for a partition of the series ID space. + var ( + wg sync.WaitGroup + n = runtime.GOMAXPROCS(0) + processors = make([]wblSubsetProcessor, n) + + dec record.Decoder + shards = make([][]record.RefSample, n) + + decoded = make(chan interface{}, 10) + decodeErr error + samplesPool = sync.Pool{ + New: func() interface{} { + return []record.RefSample{} + }, + } + markersPool = sync.Pool{ + New: func() interface{} { + return []record.RefMmapMarker{} + }, + } + ) + + defer func() { + // For CorruptionErr ensure to terminate all workers before exiting. + // We also wrap it to identify OOO WBL corruption. + _, ok := err.(*wal.CorruptionErr) + if ok { + err = &errLoadWbl{err: err} + for i := 0; i < n; i++ { + processors[i].closeAndDrain() + } + wg.Wait() + } + }() + + wg.Add(n) + for i := 0; i < n; i++ { + processors[i].setup() + + go func(wp *wblSubsetProcessor) { + unknown := wp.processWALSamples(h) + unknownRefs.Add(unknown) + wg.Done() + }(&processors[i]) + } + + go func() { + defer close(decoded) + for r.Next() { + rec := r.Record() + switch dec.Type(rec) { + case record.Samples: + samples := samplesPool.Get().([]record.RefSample)[:0] + samples, err = dec.Samples(rec, samples) + if err != nil { + decodeErr = &wal.CorruptionErr{ + Err: errors.Wrap(err, "decode samples"), + Segment: r.Segment(), + Offset: r.Offset(), + } + return + } + decoded <- samples + case record.MmapMarkers: + markers := markersPool.Get().([]record.RefMmapMarker)[:0] + markers, err = dec.MmapMarkers(rec, markers) + if err != nil { + decodeErr = &wal.CorruptionErr{ + Err: errors.Wrap(err, "decode mmap markers"), + Segment: r.Segment(), + Offset: r.Offset(), + } + return + } + decoded <- markers + default: + // Noop. + } + } + }() + + // The records are always replayed from the oldest to the newest. + for d := range decoded { + switch v := d.(type) { + case []record.RefSample: + samples := v + // We split up the samples into parts of 5000 samples or less. + // With O(300 * #cores) in-flight sample batches, large scrapes could otherwise + // cause thousands of very large in flight buffers occupying large amounts + // of unused memory. + for len(samples) > 0 { + m := 5000 + if len(samples) < m { + m = len(samples) + } + for i := 0; i < n; i++ { + shards[i] = processors[i].reuseBuf() + } + for _, sam := range samples[:m] { + if r, ok := multiRef[sam.Ref]; ok { + sam.Ref = r + } + mod := uint64(sam.Ref) % uint64(n) + shards[mod] = append(shards[mod], sam) + } + for i := 0; i < n; i++ { + processors[i].input <- shards[i] + } + samples = samples[m:] + } + //nolint:staticcheck // Ignore SA6002 relax staticcheck verification. + samplesPool.Put(d) + case []record.RefMmapMarker: + markers := v + for _, rm := range markers { + seq, off := rm.MmapRef.Unpack() + if seq > lastSeq || (seq == lastSeq && off > lastOff) { + // This m-map chunk from markers was not present during + // the load of mmapped chunks that happened in the head + // initialization. + continue + } + + ms := h.series.getByID(rm.Ref) + if ms == nil { + unknownRefs.Inc() + continue + } + + idx := uint64(ms.ref) % uint64(n) + // It is possible that some old sample is being processed in processWALSamples that + // could cause race below. So we wait for the goroutine to empty input the buffer and finish + // processing all old samples after emptying the buffer. + processors[idx].waitUntilIdle() + // Lock the subset so we can modify the series object + processors[idx].mx.Lock() + + // All samples till now have been m-mapped. Hence clear out the headChunk. + // In case some samples slipped through and went into m-map chunks because of changed + // chunk size parameters, we are not taking care of that here. + // TODO(codesome): see if there is a way to avoid duplicate m-map chunks if + // the size of ooo chunk was reduced between restart. + ms.oooHeadChunk = nil + + processors[idx].mx.Unlock() + } + default: + panic(fmt.Errorf("unexpected decoded type: %T", d)) + } + } + + if decodeErr != nil { + return decodeErr + } + + // Signal termination to each worker and wait for it to close its output channel. + for i := 0; i < n; i++ { + processors[i].closeAndDrain() + } + wg.Wait() + + if r.Err() != nil { + return errors.Wrap(r.Err(), "read records") + } + + if unknownRefs.Load() > 0 { + level.Warn(h.logger).Log("msg", "Unknown series references for ooo WAL replay", "samples", unknownRefs.Load()) + } + return nil +} + +type errLoadWbl struct { + err error +} + +func (e errLoadWbl) Error() string { + return e.err.Error() +} + +// To support errors.Cause(). +func (e errLoadWbl) Cause() error { + return e.err +} + +// To support errors.Unwrap(). +func (e errLoadWbl) Unwrap() error { + return e.err +} + +// isErrLoadOOOWal returns a boolean if the error is errLoadWbl. +func isErrLoadOOOWal(err error) bool { + _, ok := err.(*errLoadWbl) + return ok +} + +type wblSubsetProcessor struct { + mx sync.Mutex // Take this lock while modifying series in the subset. + input chan []record.RefSample + output chan []record.RefSample +} + +func (wp *wblSubsetProcessor) setup() { + wp.output = make(chan []record.RefSample, 300) + wp.input = make(chan []record.RefSample, 300) +} + +func (wp *wblSubsetProcessor) closeAndDrain() { + close(wp.input) + for range wp.output { + } +} + +// If there is a buffer in the output chan, return it for reuse, otherwise return nil. +func (wp *wblSubsetProcessor) reuseBuf() []record.RefSample { + select { + case buf := <-wp.output: + return buf[:0] + default: + } + return nil +} + +// processWALSamples adds the samples it receives to the head and passes +// the buffer received to an output channel for reuse. +// Samples before the minValidTime timestamp are discarded. +func (wp *wblSubsetProcessor) processWALSamples(h *Head) (unknownRefs uint64) { + defer close(wp.output) + + // We don't check for minValidTime for ooo samples. + + for samples := range wp.input { + wp.mx.Lock() + for _, s := range samples { + ms := h.series.getByID(s.Ref) + if ms == nil { + unknownRefs++ + continue + } + if _, chunkCreated, _ := ms.insert(s.T, s.V, h.chunkDiskMapper); chunkCreated { + h.metrics.chunksCreated.Inc() + h.metrics.chunks.Inc() + } + } + wp.mx.Unlock() + wp.output <- samples + } + + return unknownRefs +} + +func (wp *wblSubsetProcessor) waitUntilIdle() { + select { + case <-wp.output: // Allow output side to drain to avoid deadlock. + default: + } + wp.input <- []record.RefSample{} + for len(wp.input) != 0 { + time.Sleep(10 * time.Microsecond) + select { + case <-wp.output: // Allow output side to drain to avoid deadlock. + default: + } + } +} + const ( chunkSnapshotRecordTypeSeries uint8 = 1 chunkSnapshotRecordTypeTombstones uint8 = 2 diff --git a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go new file mode 100644 index 0000000000..be5144e4c0 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head.go @@ -0,0 +1,74 @@ +package tsdb + +import ( + "fmt" + + "github.com/prometheus/prometheus/tsdb/tombstones" +) + +var _ BlockReader = &OOORangeHead{} + +// OOORangeHead allows querying Head out of order samples via BlockReader +// interface implementation. +type OOORangeHead struct { + head *Head + // mint and maxt are tracked because when a query is handled we only want + // the timerange of the query and having preexisting pointers to the first + // and last timestamp help with that. + mint, maxt int64 +} + +func NewOOORangeHead(head *Head, mint, maxt int64) *OOORangeHead { + return &OOORangeHead{ + head: head, + mint: mint, + maxt: maxt, + } +} + +func (oh *OOORangeHead) Index() (IndexReader, error) { + return NewOOOHeadIndexReader(oh.head, oh.mint, oh.maxt), nil +} + +func (oh *OOORangeHead) Chunks() (ChunkReader, error) { + return NewOOOHeadChunkReader(oh.head, oh.mint, oh.maxt), nil +} + +func (oh *OOORangeHead) Tombstones() (tombstones.Reader, error) { + // As stated in the design doc https://docs.google.com/document/d/1Kppm7qL9C-BJB1j6yb6-9ObG3AbdZnFUBYPNNWwDBYM/edit?usp=sharing + // Tombstones are not supported for out of order metrics. + return tombstones.NewMemTombstones(), nil +} + +func (oh *OOORangeHead) Meta() BlockMeta { + var id [16]byte + copy(id[:], "____ooo_head____") + return BlockMeta{ + MinTime: oh.mint, + MaxTime: oh.maxt, + ULID: id, + Stats: BlockStats{ + NumSeries: oh.head.NumSeries(), + }, + } +} + +// Size returns the size taken by the Head block. +func (oh *OOORangeHead) Size() int64 { + return oh.head.Size() +} + +// String returns an human readable representation of the out of order range +// head. It's important to keep this function in order to avoid the struct dump +// when the head is stringified in errors or logs. +func (oh *OOORangeHead) String() string { + return fmt.Sprintf("ooo range head (mint: %d, maxt: %d)", oh.MinTime(), oh.MaxTime()) +} + +func (oh *OOORangeHead) MinTime() int64 { + return oh.mint +} + +func (oh *OOORangeHead) MaxTime() int64 { + return oh.maxt +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go new file mode 100644 index 0000000000..2261d59152 --- /dev/null +++ b/vendor/github.com/prometheus/prometheus/tsdb/ooo_head_read.go @@ -0,0 +1,409 @@ +package tsdb + +import ( + "errors" + "math" + "sort" + + "github.com/prometheus/prometheus/model/labels" + "github.com/prometheus/prometheus/storage" + "github.com/prometheus/prometheus/tsdb/chunkenc" + "github.com/prometheus/prometheus/tsdb/chunks" + "github.com/prometheus/prometheus/tsdb/index" + "github.com/prometheus/prometheus/tsdb/tombstones" +) + +var _ IndexReader = &OOOHeadIndexReader{} + +// OOOHeadIndexReader implements IndexReader so ooo samples in the head can be +// accessed. +// It also has a reference to headIndexReader so we can leverage on its +// IndexReader implementation for all the methods that remain the same. We +// decided to do this to avoid code duplication. +// The only methods that change are the ones about getting Series and Postings. +type OOOHeadIndexReader struct { + *headIndexReader // A reference to the headIndexReader so we can reuse as many interface implementation as possible. +} + +func NewOOOHeadIndexReader(head *Head, mint, maxt int64) *OOOHeadIndexReader { + hr := &headIndexReader{ + head: head, + mint: mint, + maxt: maxt, + } + return &OOOHeadIndexReader{hr} +} + +func (oh *OOOHeadIndexReader) Series(ref storage.SeriesRef, lbls *labels.Labels, chks *[]chunks.Meta) error { + return oh.series(ref, lbls, chks, 0) +} + +// The passed lastMmapRef tells upto what max m-map chunk that we can consider. +// If it is 0, it means all chunks need to be considered. +// If it is non-0, then the oooHeadChunk must not be considered. +func (oh *OOOHeadIndexReader) series(ref storage.SeriesRef, lbls *labels.Labels, chks *[]chunks.Meta, lastMmapRef chunks.ChunkDiskMapperRef) error { + s := oh.head.series.getByID(chunks.HeadSeriesRef(ref)) + + if s == nil { + oh.head.metrics.seriesNotFound.Inc() + return storage.ErrNotFound + } + *lbls = append((*lbls)[:0], s.lset...) + + if chks == nil { + return nil + } + + s.Lock() + defer s.Unlock() + *chks = (*chks)[:0] + + tmpChks := make([]chunks.Meta, 0, len(s.oooMmappedChunks)) + + // We define these markers to track the last chunk reference while we + // fill the chunk meta. + // These markers are useful to give consistent responses to repeated queries + // even if new chunks that might be overlapping or not are added afterwards. + // Also, lastMinT and lastMaxT are initialized to the max int as a sentinel + // value to know they are unset. + var lastChunkRef chunks.ChunkRef + lastMinT, lastMaxT := int64(math.MaxInt64), int64(math.MaxInt64) + + addChunk := func(minT, maxT int64, ref chunks.ChunkRef) { + // the first time we get called is for the last included chunk. + // set the markers accordingly + if lastMinT == int64(math.MaxInt64) { + lastChunkRef = ref + lastMinT = minT + lastMaxT = maxT + } + + tmpChks = append(tmpChks, chunks.Meta{ + MinTime: minT, + MaxTime: maxT, + Ref: ref, + OOOLastRef: lastChunkRef, + OOOLastMinTime: lastMinT, + OOOLastMaxTime: lastMaxT, + }) + } + + // Collect all chunks that overlap the query range, in order from most recent to most old, + // so we can set the correct markers. + if s.oooHeadChunk != nil { + c := s.oooHeadChunk + if c.OverlapsClosedInterval(oh.mint, oh.maxt) && lastMmapRef == 0 { + ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(len(s.oooMmappedChunks)))) + addChunk(c.minTime, c.maxTime, ref) + } + } + for i := len(s.oooMmappedChunks) - 1; i >= 0; i-- { + c := s.oooMmappedChunks[i] + if c.OverlapsClosedInterval(oh.mint, oh.maxt) && (lastMmapRef == 0 || lastMmapRef.GreaterThanOrEqualTo(c.ref)) { + ref := chunks.ChunkRef(chunks.NewHeadChunkRef(s.ref, s.oooHeadChunkID(i))) + addChunk(c.minTime, c.maxTime, ref) + } + } + + // There is nothing to do if we did not collect any chunk + if len(tmpChks) == 0 { + return nil + } + + // Next we want to sort all the collected chunks by min time so we can find + // those that overlap. + sort.Sort(metaByMinTimeAndMinRef(tmpChks)) + + // Next we want to iterate the sorted collected chunks and only return the + // chunks Meta the first chunk that overlaps with others. + // Example chunks of a series: 5:(100, 200) 6:(500, 600) 7:(150, 250) 8:(550, 650) + // In the example 5 overlaps with 7 and 6 overlaps with 8 so we only want to + // to return chunk Metas for chunk 5 and chunk 6 + *chks = append(*chks, tmpChks[0]) + maxTime := tmpChks[0].MaxTime // tracks the maxTime of the previous "to be merged chunk" + for _, c := range tmpChks[1:] { + if c.MinTime > maxTime { + *chks = append(*chks, c) + maxTime = c.MaxTime + } else if c.MaxTime > maxTime { + maxTime = c.MaxTime + (*chks)[len(*chks)-1].MaxTime = c.MaxTime + } + } + + return nil +} + +type chunkMetaAndChunkDiskMapperRef struct { + meta chunks.Meta + ref chunks.ChunkDiskMapperRef + origMinT int64 + origMaxT int64 +} + +type byMinTimeAndMinRef []chunkMetaAndChunkDiskMapperRef + +func (b byMinTimeAndMinRef) Len() int { return len(b) } +func (b byMinTimeAndMinRef) Less(i, j int) bool { + if b[i].meta.MinTime == b[j].meta.MinTime { + return b[i].meta.Ref < b[j].meta.Ref + } + return b[i].meta.MinTime < b[j].meta.MinTime +} + +func (b byMinTimeAndMinRef) Swap(i, j int) { b[i], b[j] = b[j], b[i] } + +type metaByMinTimeAndMinRef []chunks.Meta + +func (b metaByMinTimeAndMinRef) Len() int { return len(b) } +func (b metaByMinTimeAndMinRef) Less(i, j int) bool { + if b[i].MinTime == b[j].MinTime { + return b[i].Ref < b[j].Ref + } + return b[i].MinTime < b[j].MinTime +} + +func (b metaByMinTimeAndMinRef) Swap(i, j int) { b[i], b[j] = b[j], b[i] } + +func (oh *OOOHeadIndexReader) Postings(name string, values ...string) (index.Postings, error) { + switch len(values) { + case 0: + return index.EmptyPostings(), nil + case 1: + return oh.head.postings.Get(name, values[0]), nil // TODO(ganesh) Also call GetOOOPostings + default: + // TODO(ganesh) We want to only return postings for out of order series. + res := make([]index.Postings, 0, len(values)) + for _, value := range values { + res = append(res, oh.head.postings.Get(name, value)) // TODO(ganesh) Also call GetOOOPostings + } + return index.Merge(res...), nil + } +} + +type OOOHeadChunkReader struct { + head *Head + mint, maxt int64 +} + +func NewOOOHeadChunkReader(head *Head, mint, maxt int64) *OOOHeadChunkReader { + return &OOOHeadChunkReader{ + head: head, + mint: mint, + maxt: maxt, + } +} + +func (cr OOOHeadChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) { + sid, _ := chunks.HeadChunkRef(meta.Ref).Unpack() + + s := cr.head.series.getByID(sid) + // This means that the series has been garbage collected. + if s == nil { + return nil, storage.ErrNotFound + } + + s.Lock() + c, err := s.oooMergedChunk(meta, cr.head.chunkDiskMapper, cr.mint, cr.maxt) + s.Unlock() + if err != nil { + return nil, err + } + + // This means that the query range did not overlap with the requested chunk. + if len(c.chunks) == 0 { + return nil, storage.ErrNotFound + } + + return c, nil +} + +func (cr OOOHeadChunkReader) Close() error { + return nil +} + +type OOOCompactionHead struct { + oooIR *OOOHeadIndexReader + lastMmapRef chunks.ChunkDiskMapperRef + lastWBLFile int + postings []storage.SeriesRef + chunkRange int64 + mint, maxt int64 // Among all the compactable chunks. +} + +// NewOOOCompactionHead does the following: +// 1. M-maps all the in-memory ooo chunks. +// 2. Compute the expected block ranges while iterating through all ooo series and store it. +// 3. Store the list of postings having ooo series. +// 4. Cuts a new WBL file for the OOO WBL. +// All the above together have a bit of CPU and memory overhead, and can have a bit of impact +// on the sample append latency. So call NewOOOCompactionHead only right before compaction. +func NewOOOCompactionHead(head *Head) (*OOOCompactionHead, error) { + newWBLFile, err := head.wbl.NextSegment() + if err != nil { + return nil, err + } + + ch := &OOOCompactionHead{ + chunkRange: head.chunkRange.Load(), + mint: math.MaxInt64, + maxt: math.MinInt64, + lastWBLFile: newWBLFile, + } + + ch.oooIR = NewOOOHeadIndexReader(head, math.MinInt64, math.MaxInt64) + n, v := index.AllPostingsKey() + + // TODO: verify this gets only ooo samples. + p, err := ch.oooIR.Postings(n, v) + if err != nil { + return nil, err + } + p = ch.oooIR.SortedPostings(p) + + var lastSeq, lastOff int + for p.Next() { + seriesRef := p.At() + ms := head.series.getByID(chunks.HeadSeriesRef(seriesRef)) + if ms == nil { + continue + } + + // M-map the in-memory chunk and keep track of the last one. + // Also build the block ranges -> series map. + // TODO: consider having a lock specifically for ooo data. + ms.Lock() + + mmapRef := ms.mmapCurrentOOOHeadChunk(head.chunkDiskMapper) + if mmapRef == 0 && len(ms.oooMmappedChunks) > 0 { + // Nothing was m-mapped. So take the mmapRef from the existing slice if it exists. + mmapRef = ms.oooMmappedChunks[len(ms.oooMmappedChunks)-1].ref + } + seq, off := mmapRef.Unpack() + if seq > lastSeq || (seq == lastSeq && off > lastOff) { + ch.lastMmapRef, lastSeq, lastOff = mmapRef, seq, off + } + if len(ms.oooMmappedChunks) > 0 { + ch.postings = append(ch.postings, seriesRef) + for _, c := range ms.oooMmappedChunks { + if c.minTime < ch.mint { + ch.mint = c.minTime + } + if c.maxTime > ch.maxt { + ch.maxt = c.maxTime + } + } + } + ms.Unlock() + } + + return ch, nil +} + +func (ch *OOOCompactionHead) Index() (IndexReader, error) { + return NewOOOCompactionHeadIndexReader(ch), nil +} + +func (ch *OOOCompactionHead) Chunks() (ChunkReader, error) { + return NewOOOHeadChunkReader(ch.oooIR.head, ch.oooIR.mint, ch.oooIR.maxt), nil +} + +func (ch *OOOCompactionHead) Tombstones() (tombstones.Reader, error) { + return tombstones.NewMemTombstones(), nil +} + +func (ch *OOOCompactionHead) Meta() BlockMeta { + var id [16]byte + copy(id[:], "copy(id[:], \"ooo_compact_head\")") + return BlockMeta{ + MinTime: ch.mint, + MaxTime: ch.maxt, + ULID: id, + Stats: BlockStats{ + NumSeries: uint64(len(ch.postings)), + }, + } +} + +// CloneForTimeRange clones the OOOCompactionHead such that the IndexReader and ChunkReader +// obtained from this only looks at the m-map chunks within the given time ranges while not looking +// beyond the ch.lastMmapRef. +// Only the method of BlockReader interface are valid for the cloned OOOCompactionHead. +func (ch *OOOCompactionHead) CloneForTimeRange(mint, maxt int64) *OOOCompactionHead { + return &OOOCompactionHead{ + oooIR: NewOOOHeadIndexReader(ch.oooIR.head, mint, maxt), + lastMmapRef: ch.lastMmapRef, + postings: ch.postings, + chunkRange: ch.chunkRange, + mint: ch.mint, + maxt: ch.maxt, + } +} + +func (ch *OOOCompactionHead) Size() int64 { return 0 } +func (ch *OOOCompactionHead) MinTime() int64 { return ch.mint } +func (ch *OOOCompactionHead) MaxTime() int64 { return ch.maxt } +func (ch *OOOCompactionHead) ChunkRange() int64 { return ch.chunkRange } +func (ch *OOOCompactionHead) LastMmapRef() chunks.ChunkDiskMapperRef { return ch.lastMmapRef } +func (ch *OOOCompactionHead) LastWBLFile() int { return ch.lastWBLFile } + +type OOOCompactionHeadIndexReader struct { + ch *OOOCompactionHead +} + +func NewOOOCompactionHeadIndexReader(ch *OOOCompactionHead) IndexReader { + return &OOOCompactionHeadIndexReader{ch: ch} +} + +func (ir *OOOCompactionHeadIndexReader) Symbols() index.StringIter { + return ir.ch.oooIR.Symbols() +} + +func (ir *OOOCompactionHeadIndexReader) Postings(name string, values ...string) (index.Postings, error) { + n, v := index.AllPostingsKey() + if name != n || len(values) != 1 || values[0] != v { + return nil, errors.New("only AllPostingsKey is supported") + } + return index.NewListPostings(ir.ch.postings), nil +} + +func (ir *OOOCompactionHeadIndexReader) SortedPostings(p index.Postings) index.Postings { + // This will already be sorted from the Postings() call above. + return p +} + +func (ir *OOOCompactionHeadIndexReader) ShardedPostings(p index.Postings, shardIndex, shardCount uint64) index.Postings { + return ir.ch.oooIR.ShardedPostings(p, shardIndex, shardCount) +} + +func (ir *OOOCompactionHeadIndexReader) Series(ref storage.SeriesRef, lset *labels.Labels, chks *[]chunks.Meta) error { + return ir.ch.oooIR.series(ref, lset, chks, ir.ch.lastMmapRef) +} + +func (ir *OOOCompactionHeadIndexReader) SortedLabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { + return nil, errors.New("not implemented") +} + +func (ir *OOOCompactionHeadIndexReader) LabelValues(name string, matchers ...*labels.Matcher) ([]string, error) { + return nil, errors.New("not implemented") +} + +func (ir *OOOCompactionHeadIndexReader) PostingsForMatchers(concurrent bool, ms ...*labels.Matcher) (index.Postings, error) { + return nil, errors.New("not implemented") +} + +func (ir *OOOCompactionHeadIndexReader) LabelNames(matchers ...*labels.Matcher) ([]string, error) { + return nil, errors.New("not implemented") +} + +func (ir *OOOCompactionHeadIndexReader) LabelValueFor(id storage.SeriesRef, label string) (string, error) { + return "", errors.New("not implemented") +} + +func (ir *OOOCompactionHeadIndexReader) LabelNamesFor(ids ...storage.SeriesRef) ([]string, error) { + return nil, errors.New("not implemented") +} + +func (ir *OOOCompactionHeadIndexReader) Close() error { + return ir.ch.oooIR.Close() +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/querier.go b/vendor/github.com/prometheus/prometheus/tsdb/querier.go index e45ff29880..e419a59d0c 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/querier.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/querier.go @@ -518,7 +518,7 @@ func (p *populateWithDelGenericSeriesIterator) next() bool { p.i++ p.currChkMeta = p.chks[p.i] - p.currChkMeta.Chunk, p.err = p.chunks.Chunk(p.currChkMeta.Ref) + p.currChkMeta.Chunk, p.err = p.chunks.Chunk(p.currChkMeta) if p.err != nil { p.err = errors.Wrapf(p.err, "cannot populate chunk %d", p.currChkMeta.Ref) return false @@ -847,7 +847,7 @@ func newNopChunkReader() ChunkReader { } } -func (cr nopChunkReader) Chunk(ref chunks.ChunkRef) (chunkenc.Chunk, error) { +func (cr nopChunkReader) Chunk(meta chunks.Meta) (chunkenc.Chunk, error) { return cr.emptyChunk, nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/record/record.go b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go index 12a4047396..9a9fe0a1c2 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/record/record.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/record/record.go @@ -42,6 +42,8 @@ const ( Tombstones Type = 3 // Exemplars is used to match WAL records of type Exemplars. Exemplars Type = 4 + // MmapMarkers is used to match OOO WBL records of type MmapMarkers. + MmapMarkers Type = 5 ) // ErrNotFound is returned if a looked up resource was not found. Duplicate ErrNotFound from head.go. @@ -68,6 +70,12 @@ type RefExemplar struct { Labels labels.Labels } +// RefMmapMarker marks that the all the samples of the given series until now have been m-mapped to disk. +type RefMmapMarker struct { + Ref chunks.HeadSeriesRef + MmapRef chunks.ChunkDiskMapperRef +} + // Decoder decodes series, sample, and tombstone records. // The zero value is ready to use. type Decoder struct{} @@ -79,7 +87,7 @@ func (d *Decoder) Type(rec []byte) Type { return Unknown } switch t := Type(rec[0]); t { - case Series, Samples, Tombstones, Exemplars: + case Series, Samples, Tombstones, Exemplars, MmapMarkers: return t } return Unknown @@ -223,6 +231,34 @@ func (d *Decoder) ExemplarsFromBuffer(dec *encoding.Decbuf, exemplars []RefExemp return exemplars, nil } +func (d *Decoder) MmapMarkers(rec []byte, markers []RefMmapMarker) ([]RefMmapMarker, error) { + dec := encoding.Decbuf{B: rec} + t := Type(dec.Byte()) + if t != MmapMarkers { + return nil, errors.New("invalid record type") + } + + if dec.Len() == 0 { + return markers, nil + } + for len(dec.B) > 0 && dec.Err() == nil { + ref := chunks.HeadSeriesRef(dec.Be64()) + mmapRef := chunks.ChunkDiskMapperRef(dec.Be64()) + markers = append(markers, RefMmapMarker{ + Ref: ref, + MmapRef: mmapRef, + }) + } + + if dec.Err() != nil { + return nil, errors.Wrapf(dec.Err(), "decode error after %d mmap markers", len(markers)) + } + if len(dec.B) > 0 { + return nil, errors.Errorf("unexpected %d bytes left in entry", len(dec.B)) + } + return markers, nil +} + // Encoder encodes series, sample, and tombstones records. // The zero value is ready to use. type Encoder struct{} @@ -316,3 +352,15 @@ func (e *Encoder) EncodeExemplarsIntoBuffer(exemplars []RefExemplar, buf *encodi } } } + +func (e *Encoder) MmapMarkers(markers []RefMmapMarker, b []byte) []byte { + buf := encoding.Encbuf{B: b} + buf.PutByte(byte(MmapMarkers)) + + for _, s := range markers { + buf.PutBE64(uint64(s.Ref)) + buf.PutBE64(uint64(s.MmapRef)) + } + + return buf.Get() +} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go b/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go index ace6a99566..9031ec1628 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/wal/wal.go @@ -40,6 +40,7 @@ const ( DefaultSegmentSize = 128 * 1024 * 1024 // 128 MB pageSize = 32 * 1024 // 32KB recordHeaderSize = 7 + WblDirName = "wbl" ) // The table gets initialized with sync.Once but may still cause a race @@ -200,36 +201,41 @@ type walMetrics struct { writesFailed prometheus.Counter } -func newWALMetrics(r prometheus.Registerer) *walMetrics { +func newWALMetrics(r prometheus.Registerer, isOOO bool) *walMetrics { m := &walMetrics{} + prefix := "prometheus_tsdb_wal" + if isOOO { + prefix = "prometheus_tsdb_out_of_order_wal" + } + m.fsyncDuration = prometheus.NewSummary(prometheus.SummaryOpts{ - Name: "prometheus_tsdb_wal_fsync_duration_seconds", + Name: fmt.Sprintf("%s_fsync_duration_seconds", prefix), Help: "Duration of WAL fsync.", Objectives: map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001}, }) m.pageFlushes = prometheus.NewCounter(prometheus.CounterOpts{ - Name: "prometheus_tsdb_wal_page_flushes_total", + Name: fmt.Sprintf("%s_page_flushes_total", prefix), Help: "Total number of page flushes.", }) m.pageCompletions = prometheus.NewCounter(prometheus.CounterOpts{ - Name: "prometheus_tsdb_wal_completed_pages_total", + Name: fmt.Sprintf("%s_completed_pages_total", prefix), Help: "Total number of completed pages.", }) m.truncateFail = prometheus.NewCounter(prometheus.CounterOpts{ - Name: "prometheus_tsdb_wal_truncations_failed_total", + Name: fmt.Sprintf("%s_truncations_failed_total", prefix), Help: "Total number of WAL truncations that failed.", }) m.truncateTotal = prometheus.NewCounter(prometheus.CounterOpts{ - Name: "prometheus_tsdb_wal_truncations_total", + Name: fmt.Sprintf("%s_truncations_total", prefix), Help: "Total number of WAL truncations attempted.", }) m.currentSegment = prometheus.NewGauge(prometheus.GaugeOpts{ - Name: "prometheus_tsdb_wal_segment_current", + Name: fmt.Sprintf("%s_segment_current", prefix), Help: "WAL segment index that TSDB is currently writing to.", }) m.writesFailed = prometheus.NewCounter(prometheus.CounterOpts{ - Name: "prometheus_tsdb_wal_writes_failed_total", + Name: fmt.Sprintf("%s_writes_failed_total", prefix), Help: "Total number of WAL writes that failed.", }) @@ -274,7 +280,12 @@ func NewSize(logger log.Logger, reg prometheus.Registerer, dir string, segmentSi stopc: make(chan chan struct{}), compress: compress, } - w.metrics = newWALMetrics(reg) + isOOO := false + if filepath.Base(dir) == WblDirName { + // TODO(codesome): have a less hacky way to do it. + isOOO = true + } + w.metrics = newWALMetrics(reg, isOOO) _, last, err := Segments(w.Dir()) if err != nil { @@ -460,31 +471,33 @@ func SegmentName(dir string, i int) string { } // NextSegment creates the next segment and closes the previous one. -func (w *WAL) NextSegment() error { +// It returns the file number of the new file. +func (w *WAL) NextSegment() (int, error) { w.mtx.Lock() defer w.mtx.Unlock() return w.nextSegment() } // nextSegment creates the next segment and closes the previous one. -func (w *WAL) nextSegment() error { +// It returns the file number of the new file. +func (w *WAL) nextSegment() (int, error) { if w.closed { - return errors.New("wal is closed") + return 0, errors.New("wal is closed") } // Only flush the current page if it actually holds data. if w.page.alloc > 0 { if err := w.flushPage(true); err != nil { - return err + return 0, err } } next, err := CreateSegment(w.Dir(), w.segment.Index()+1) if err != nil { - return errors.Wrap(err, "create new segment file") + return 0, errors.Wrap(err, "create new segment file") } prev := w.segment if err := w.setSegment(next); err != nil { - return err + return 0, err } // Don't block further writes by fsyncing the last segment. @@ -496,7 +509,7 @@ func (w *WAL) nextSegment() error { level.Error(w.logger).Log("msg", "close previous segment", "err", err) } } - return nil + return next.Index(), nil } func (w *WAL) setSegment(segment *Segment) error { @@ -638,7 +651,7 @@ func (w *WAL) log(rec []byte, final bool) error { left += (pageSize - recordHeaderSize) * (w.pagesPerSegment() - w.donePages - 1) // Free pages in the active segment. if len(rec) > left { - if err := w.nextSegment(); err != nil { + if _, err := w.nextSegment(); err != nil { return err } } diff --git a/vendor/github.com/thanos-io/thanos/pkg/block/index.go b/vendor/github.com/thanos-io/thanos/pkg/block/index.go index 830ac89718..7ef37f9862 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/block/index.go +++ b/vendor/github.com/thanos-io/thanos/pkg/block/index.go @@ -579,7 +579,7 @@ func rewrite( sort.Sort(lset) for i, c := range chks { - chks[i].Chunk, err = chunkr.Chunk(c.Ref) + chks[i].Chunk, err = chunkr.Chunk(c) if err != nil { return errors.Wrap(err, "chunk read") } diff --git a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go index f656a796c5..a8b1cd5ef1 100644 --- a/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go +++ b/vendor/github.com/thanos-io/thanos/pkg/compact/downsample/downsample.go @@ -128,7 +128,7 @@ func Downsample( // While #183 exists, we sanitize the chunks we retrieved from the block // before retrieving their samples. for i, c := range chks { - chk, err := chunkr.Chunk(c.Ref) + chk, err := chunkr.Chunk(c) if err != nil { return id, errors.Wrapf(err, "get chunk %d, series %d", c.Ref, postings.At()) } diff --git a/vendor/modules.txt b/vendor/modules.txt index be715c7b4b..6d57f111d4 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -717,7 +717,7 @@ github.com/prometheus/node_exporter/https github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v1.8.2-0.20220308163432-03831554a519 => github.com/grafana/mimir-prometheus v0.0.0-20220614075514-f2aba4af80e4 +# github.com/prometheus/prometheus v1.8.2-0.20220308163432-03831554a519 => github.com/grafana/mimir-prometheus v0.0.0-20220622114521-df59320886e0 ## explicit; go 1.17 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -807,7 +807,7 @@ github.com/stretchr/objx github.com/stretchr/testify/assert github.com/stretchr/testify/mock github.com/stretchr/testify/require -# github.com/thanos-io/thanos v0.26.1-0.20220602051129-a6f6ce060ed4 +# github.com/thanos-io/thanos v0.26.1-0.20220602051129-a6f6ce060ed4 => github.com/grafana/thanos v0.19.1-0.20220610094531-ab07eb568317 ## explicit; go 1.17 github.com/thanos-io/thanos/pkg/block github.com/thanos-io/thanos/pkg/block/metadata @@ -1227,7 +1227,8 @@ gopkg.in/yaml.v2 gopkg.in/yaml.v3 # git.apache.org/thrift.git => github.com/apache/thrift v0.0.0-20180902110319-2566ecd5d999 # github.com/bradfitz/gomemcache => github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab -# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20220614075514-f2aba4af80e4 +# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20220622114521-df59320886e0 +# github.com/thanos-io/thanos => github.com/grafana/thanos v0.19.1-0.20220610094531-ab07eb568317 # github.com/hashicorp/go-immutable-radix => github.com/hashicorp/go-immutable-radix v1.2.0 # github.com/hashicorp/go-hclog => github.com/hashicorp/go-hclog v0.12.2 # github.com/hashicorp/memberlist => github.com/grafana/memberlist v0.3.1-0.20220425183535-6b97a09b7167 From a09bb62c68ddef4cdebe1a32616dcf96a1842fa6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Miguel=20=C3=81ngel=20Ortu=C3=B1o?= Date: Thu, 23 Jun 2022 11:41:26 +0200 Subject: [PATCH 31/63] helm: use custom memcached templates (#2064) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * helm: use own memcached templates * Remove chart testing dependency Bitnami is no longer in use. Signed-off-by: Miguel Ángel Ortuño Co-authored-by: György Krajcsovits Signed-off-by: György Krajcsovits --- .../migration-guide/migrating-from-cortex.md | 16 +- .../charts/mimir-distributed/CHANGELOG.md | 14 + .../helm/charts/mimir-distributed/Chart.lock | 16 +- .../helm/charts/mimir-distributed/Chart.yaml | 20 - .../helm/charts/mimir-distributed/README.md | 4 - .../mimir-distributed/capped-large.yaml | 8 +- .../mimir-distributed/capped-small.yaml | 8 +- .../mimir-distributed/ci/test-oss-values.yaml | 28 ++ .../helm/charts/mimir-distributed/large.yaml | 10 +- .../helm/charts/mimir-distributed/small.yaml | 8 +- .../memcached-chunks-pdb.yaml | 1 + .../memcached-chunks-servmon.yaml | 3 + .../memcached-chunks-statefulset.yaml | 1 + .../memcached-chunks-svc-headless.yaml | 1 + .../memcached-index-queries-pdb.yaml | 1 + .../memcached-index-queries-servmon.yaml | 3 + .../memcached-index-queries-statefulset.yaml | 1 + .../memcached-index-queries-svc-headless.yaml | 1 + .../memcached-metadata-pdb.yaml | 1 + .../memcached-metadata-servmon.yaml | 3 + .../memcached-metadata-statefulset.yaml | 1 + .../memcached-metadata-svc-headless.yaml | 1 + .../memcached-results-pdb.yaml | 1 + .../memcached-results-servmon.yaml | 3 + .../memcached-results-statefulset.yaml | 1 + .../memcached-results-svc-headless.yaml | 1 + .../templates/memcached/_memcached-pdb.tpl | 23 + .../memcached/_memcached-statefulset.tpl | 116 ++++++ .../templates/memcached/_memcached-svc.tpl | 35 ++ .../helm/charts/mimir-distributed/values.yaml | 393 ++++++++++++------ operations/helm/ct.yaml | 2 - .../memcached-chunks-statefulset.yaml | 91 ++++ .../memcached-chunks-svc-headless.yaml | 31 ++ .../memcached-index-queries-statefulset.yaml | 91 ++++ .../memcached-index-queries-svc-headless.yaml | 31 ++ .../memcached-metadata-statefulset.yaml | 91 ++++ .../memcached-metadata-svc-headless.yaml | 31 ++ .../memcached-results-statefulset.yaml | 91 ++++ .../memcached-results-svc-headless.yaml | 31 ++ .../templates/mimir-config.yaml | 22 + 40 files changed, 1044 insertions(+), 191 deletions(-) create mode 100644 operations/helm/charts/mimir-distributed/templates/memcached-chunks/memcached-chunks-pdb.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/memcached-chunks/memcached-chunks-servmon.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/memcached-chunks/memcached-chunks-statefulset.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/memcached-chunks/memcached-chunks-svc-headless.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/memcached-index-queries/memcached-index-queries-pdb.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/memcached-index-queries/memcached-index-queries-servmon.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/memcached-index-queries/memcached-index-queries-statefulset.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/memcached-index-queries/memcached-index-queries-svc-headless.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/memcached-metadata/memcached-metadata-pdb.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/memcached-metadata/memcached-metadata-servmon.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/memcached-metadata/memcached-metadata-statefulset.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/memcached-metadata/memcached-metadata-svc-headless.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/memcached-results/memcached-results-pdb.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/memcached-results/memcached-results-servmon.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/memcached-results/memcached-results-statefulset.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/memcached-results/memcached-results-svc-headless.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/memcached/_memcached-pdb.tpl create mode 100644 operations/helm/charts/mimir-distributed/templates/memcached/_memcached-statefulset.tpl create mode 100644 operations/helm/charts/mimir-distributed/templates/memcached/_memcached-svc.tpl create mode 100644 operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-chunks/memcached-chunks-statefulset.yaml create mode 100644 operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-chunks/memcached-chunks-svc-headless.yaml create mode 100644 operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-index-queries/memcached-index-queries-statefulset.yaml create mode 100644 operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-index-queries/memcached-index-queries-svc-headless.yaml create mode 100644 operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-metadata/memcached-metadata-statefulset.yaml create mode 100644 operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-metadata/memcached-metadata-svc-headless.yaml create mode 100644 operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-results/memcached-results-statefulset.yaml create mode 100644 operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-results/memcached-results-svc-headless.yaml diff --git a/docs/sources/migration-guide/migrating-from-cortex.md b/docs/sources/migration-guide/migrating-from-cortex.md index fffee04e3d..e2bfa32809 100644 --- a/docs/sources/migration-guide/migrating-from-cortex.md +++ b/docs/sources/migration-guide/migrating-from-cortex.md @@ -260,29 +260,29 @@ You can update to the Grafana Mimir Helm chart from the Cortex Helm chart. mimir: config: | blocks_storage: - {{- if .Values.memcached.enabled }} + {{- if index .Values "memcached-chunks" "enabled" }} chunks_cache: backend: "memcached" memcached: - addresses: "dns+{{ .Release.Name }}-memcached.{{ .Release.Namespace }}.svc:11211" - max_item_size: {{ .Values.memcached.maxItemMemory }} + addresses: dns+{{ template "mimir.fullname" . }}-memcached-chunks.{{ .Release.Namespace }}.svc:{{ (index .Values "memcached-chunks").port }} + max_item_size: {{ mul (index .Values "memcached-chunks").maxItemMemory 1024 1024 }} {{- end }} {{- if index .Values "memcached-metadata" "enabled" }} metadata_cache: backend: "memcached" memcached: - addresses: "dns+{{ .Release.Name }}-memcached-metadata.{{ .Release.Namespace }}.svc:11211" - max_item_size: {{ (index .Values "memcached-metadata").maxItemMemory }} + addresses: dns+{{ template "mimir.fullname" . }}-memcached-metadata.{{ .Release.Namespace }}.svc:{{ (index .Values "memcached-metadata").port }} + max_item_size: {{ mul (index .Values "memcached-metadata").maxItemMemory 1024 1024 }} {{- end }} {{- if index .Values "memcached-queries" "enabled" }} index_cache: backend: "memcached" memcached: - addresses: "dns+{{ .Release.Name }}-memcached-queries.{{ .Release.Namespace }}.svc:11211" - max_item_size: {{ (index .Values "memcached-queries").maxItemMemory }} + addresses: dns+{{ template "mimir.fullname" . }}-memcached-index-queries.{{ .Release.Namespace }}.svc:{{ (index .Values "memcached-index-queries").port }} + max_item_size: {{ mul (index .Values "memcached-index-queries").maxItemMemory 1024 1024 }} {{- end }} frontend_worker: - frontend_address: "{{ template "mimir.fullname" . }}-query-frontend-headless.{{ .Release.Namespace }}.svc:{{ include "mimir.serverGrpcListenPort" . }}" + frontend_address: "{{ template "mimir.fullname" . }}-query-frontend.{{ .Release.Namespace }}.svc:{{ include "mimir.serverGrpcListenPort" . }}" ingester: ring: num_tokens: 512 diff --git a/operations/helm/charts/mimir-distributed/CHANGELOG.md b/operations/helm/charts/mimir-distributed/CHANGELOG.md index 38aa634820..bdcdf8c70e 100644 --- a/operations/helm/charts/mimir-distributed/CHANGELOG.md +++ b/operations/helm/charts/mimir-distributed/CHANGELOG.md @@ -13,6 +13,20 @@ Entries should include a reference to the Pull Request that introduced the chang ## main / unreleased * [CHANGE] Enable multi-tenancy by default. This means `multitenancy_enabled` is now `true` for both Mimir and Enterprise Metrics. Nginx will inject `X-Scope-OrgID=anonymous` header if the header is not present, ensuring backwards compatibility. #2117 +* [CHANGE] **breaking change** Chart now uses custom memcached templates to remove bitnami dependency. There are changes to the Helm values, listed bellow. #2064 + - The `memcached` section now contains common values shared across all memcached instances. + - New `memcachedExporter` section was added to configure memcached metrics exporter. + - New `memcached-chunks` section was added that refers to previous `memcached` configuration. + - The section `memcached-queries` is renamed to `memcached-index-queries`. + - The value `memcached-*.replicaCount` is replaced with `memcached-*.replicas` to align with the rest of the services. + - Renamed `memcached.replicaCount` to `memcached-chunks.replicas`. + - Renamed `memcached-queries.replicaCount` to `memcached-index-queries.replicas`. + - Renamed `memcached-metadata.replicaCount` to `memcached-metadata.replicas`. + - Renamed `memcached-results.replicaCount` to `memcached-results.replicas`. + - All memcached instances now share the same `ServiceAccount` that the chart uses for its services. + - The value `memcached-*.architecture` was removed. + - The value `memcached-*.arguments` was removed, the default arguments are now encoded in the template. Use `memcached-*.extraArgs` to provide additional arguments as well as the values `memcached-*.allocatedMemory`, `memcached-*.maxItemMemory` and `memcached-*.port` to set the memcached command line flags `-m`, `-I` and `-u`. + - The remaining arguments are aligned with the rest of the chart's services, please consult the values file to check whether a parameter exists or was renamed. * [CHANGE] Change default value for `blocks_storage.bucket_store.chunks_cache.memcached.timeout` to `450ms` to increase use of cached data. #2035 * [FEATURE] Add `mimir-continuous-test` in smoke-test mode. Use `helm test` to run a smoke test of the read + write path. * [ENHANCEMENT] ServiceMonitor object will now have default values based on release namesapce in the `namespace` and `namespaceSelector` fields. #2123 diff --git a/operations/helm/charts/mimir-distributed/Chart.lock b/operations/helm/charts/mimir-distributed/Chart.lock index 9c5179eb26..2c58560520 100644 --- a/operations/helm/charts/mimir-distributed/Chart.lock +++ b/operations/helm/charts/mimir-distributed/Chart.lock @@ -1,18 +1,6 @@ dependencies: -- name: memcached - repository: https://raw.githubusercontent.com/bitnami/charts/eb5f9a9513d987b519f0ecd732e7031241c50328/bitnami - version: 5.5.2 -- name: memcached - repository: https://raw.githubusercontent.com/bitnami/charts/eb5f9a9513d987b519f0ecd732e7031241c50328/bitnami - version: 5.5.2 -- name: memcached - repository: https://raw.githubusercontent.com/bitnami/charts/eb5f9a9513d987b519f0ecd732e7031241c50328/bitnami - version: 5.5.2 -- name: memcached - repository: https://raw.githubusercontent.com/bitnami/charts/eb5f9a9513d987b519f0ecd732e7031241c50328/bitnami - version: 5.5.2 - name: minio repository: https://helm.min.io/ version: 8.0.10 -digest: sha256:0b58716cf86880510e4ce9dacb312db80918d14704e68127b833b81b5fd7d7f3 -generated: "2022-06-02T09:31:09.709064-05:00" +digest: sha256:826b6cc453742c71c2159500596d78666fbdf0ff3ed105caa7ca162ecbd36a45 +generated: "2022-06-09T08:29:05.191797+02:00" diff --git a/operations/helm/charts/mimir-distributed/Chart.yaml b/operations/helm/charts/mimir-distributed/Chart.yaml index a5d4913ee9..f6722f32c7 100644 --- a/operations/helm/charts/mimir-distributed/Chart.yaml +++ b/operations/helm/charts/mimir-distributed/Chart.yaml @@ -8,26 +8,6 @@ icon: https://grafana.com/static/img/logos/logo-mimir.svg kubeVersion: ^1.10.0-0 name: mimir-distributed dependencies: - - name: memcached - alias: memcached - version: 5.5.2 - repository: https://raw.githubusercontent.com/bitnami/charts/eb5f9a9513d987b519f0ecd732e7031241c50328/bitnami - condition: memcached.enabled - - name: memcached - alias: memcached-queries - version: 5.5.2 - repository: https://raw.githubusercontent.com/bitnami/charts/eb5f9a9513d987b519f0ecd732e7031241c50328/bitnami - condition: memcached-queries.enabled - - name: memcached - alias: memcached-metadata - version: 5.5.2 - repository: https://raw.githubusercontent.com/bitnami/charts/eb5f9a9513d987b519f0ecd732e7031241c50328/bitnami - condition: memcached-metadata.enabled - - name: memcached - alias: memcached-results - version: 5.5.2 - repository: https://raw.githubusercontent.com/bitnami/charts/eb5f9a9513d987b519f0ecd732e7031241c50328/bitnami - condition: memcached-results.enabled - name: minio alias: minio version: 8.0.10 diff --git a/operations/helm/charts/mimir-distributed/README.md b/operations/helm/charts/mimir-distributed/README.md index d406d51057..6599b9fd75 100644 --- a/operations/helm/charts/mimir-distributed/README.md +++ b/operations/helm/charts/mimir-distributed/README.md @@ -15,10 +15,6 @@ Kubernetes: `^1.10.0-0` | Repository | Name | Version | |------------|------|---------| | https://helm.min.io/ | minio(minio) | 8.0.10 | -| https://raw.githubusercontent.com/bitnami/charts/eb5f9a9513d987b519f0ecd732e7031241c50328/bitnami | memcached(memcached) | 5.5.2 | -| https://raw.githubusercontent.com/bitnami/charts/eb5f9a9513d987b519f0ecd732e7031241c50328/bitnami | memcached-queries(memcached) | 5.5.2 | -| https://raw.githubusercontent.com/bitnami/charts/eb5f9a9513d987b519f0ecd732e7031241c50328/bitnami | memcached-metadata(memcached) | 5.5.2 | -| https://raw.githubusercontent.com/bitnami/charts/eb5f9a9513d987b519f0ecd732e7031241c50328/bitnami | memcached-results(memcached) | 5.5.2 | ## Dependencies diff --git a/operations/helm/charts/mimir-distributed/capped-large.yaml b/operations/helm/charts/mimir-distributed/capped-large.yaml index 6a196584d7..245789e3fb 100644 --- a/operations/helm/charts/mimir-distributed/capped-large.yaml +++ b/operations/helm/charts/mimir-distributed/capped-large.yaml @@ -71,13 +71,13 @@ ingester: - ingester topologyKey: 'kubernetes.io/hostname' -memcached: +memcached-chunks: enabled: true - replicaCount: 32 + replicas: 32 -memcached-queries: +memcached-index-queries: enabled: true - replicaCount: 10 + replicas: 10 memcached-metadata: enabled: true diff --git a/operations/helm/charts/mimir-distributed/capped-small.yaml b/operations/helm/charts/mimir-distributed/capped-small.yaml index e864a30edb..b97038c517 100644 --- a/operations/helm/charts/mimir-distributed/capped-small.yaml +++ b/operations/helm/charts/mimir-distributed/capped-small.yaml @@ -71,13 +71,13 @@ ingester: - ingester topologyKey: 'kubernetes.io/hostname' -memcached: +memcached-chunks: enabled: true - replicaCount: 2 + replicas: 2 -memcached-queries: +memcached-index-queries: enabled: true - replicaCount: 3 + replicas: 3 memcached-metadata: enabled: true diff --git a/operations/helm/charts/mimir-distributed/ci/test-oss-values.yaml b/operations/helm/charts/mimir-distributed/ci/test-oss-values.yaml index 8e4320d681..b2c6fb2086 100644 --- a/operations/helm/charts/mimir-distributed/ci/test-oss-values.yaml +++ b/operations/helm/charts/mimir-distributed/ci/test-oss-values.yaml @@ -49,3 +49,31 @@ store_gateway: testing: minio: use_secret: true + +memcached-chunks: + enabled: true + allocatedMemory: 10 + resources: + requests: + cpu: 10m + +memcached-index-queries: + enabled: true + allocatedMemory: 30 + resources: + requests: + cpu: 10m + +memcached-metadata: + enabled: true + allocatedMemory: 10 + resources: + requests: + cpu: 10m + +memcached-results: + enabled: true + allocatedMemory: 10 + resources: + requests: + cpu: 10m diff --git a/operations/helm/charts/mimir-distributed/large.yaml b/operations/helm/charts/mimir-distributed/large.yaml index 47583be960..8daebdc4f6 100644 --- a/operations/helm/charts/mimir-distributed/large.yaml +++ b/operations/helm/charts/mimir-distributed/large.yaml @@ -69,20 +69,20 @@ ingester: topologyKey: 'kubernetes.io/hostname' -memcached: +memcached-chunks: enabled: true - replicaCount: 32 + replicas: 32 -memcached-queries: +memcached-index-queries: enabled: true - replicaCount: 10 + replicas: 10 memcached-metadata: enabled: true memcached-results: enabled: true - replicaCount: 4 + replicas: 4 minio: enabled: false diff --git a/operations/helm/charts/mimir-distributed/small.yaml b/operations/helm/charts/mimir-distributed/small.yaml index db961c2086..f688078513 100644 --- a/operations/helm/charts/mimir-distributed/small.yaml +++ b/operations/helm/charts/mimir-distributed/small.yaml @@ -69,13 +69,13 @@ ingester: topologyKey: 'kubernetes.io/hostname' -memcached: +memcached-chunks: enabled: true - replicaCount: 2 + replicas: 2 -memcached-queries: +memcached-index-queries: enabled: true - replicaCount: 3 + replicas: 3 memcached-metadata: enabled: true diff --git a/operations/helm/charts/mimir-distributed/templates/memcached-chunks/memcached-chunks-pdb.yaml b/operations/helm/charts/mimir-distributed/templates/memcached-chunks/memcached-chunks-pdb.yaml new file mode 100644 index 0000000000..df05627969 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/memcached-chunks/memcached-chunks-pdb.yaml @@ -0,0 +1 @@ +{{- include "mimir.memcached.podDisruptionBudget" (dict "ctx" $ "component" "memcached-chunks" ) }} diff --git a/operations/helm/charts/mimir-distributed/templates/memcached-chunks/memcached-chunks-servmon.yaml b/operations/helm/charts/mimir-distributed/templates/memcached-chunks/memcached-chunks-servmon.yaml new file mode 100644 index 0000000000..0e73850099 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/memcached-chunks/memcached-chunks-servmon.yaml @@ -0,0 +1,3 @@ +{{- if index .Values "memcached-chunks" "enabled" }} +{{- include "mimir.lib.serviceMonitor" (dict "ctx" $ "component" "memcached-chunks") }} +{{- end -}} diff --git a/operations/helm/charts/mimir-distributed/templates/memcached-chunks/memcached-chunks-statefulset.yaml b/operations/helm/charts/mimir-distributed/templates/memcached-chunks/memcached-chunks-statefulset.yaml new file mode 100644 index 0000000000..530c79a1fa --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/memcached-chunks/memcached-chunks-statefulset.yaml @@ -0,0 +1 @@ +{{- include "mimir.memcached.statefulSet" (dict "ctx" $ "component" "memcached-chunks" ) }} diff --git a/operations/helm/charts/mimir-distributed/templates/memcached-chunks/memcached-chunks-svc-headless.yaml b/operations/helm/charts/mimir-distributed/templates/memcached-chunks/memcached-chunks-svc-headless.yaml new file mode 100644 index 0000000000..e9c49e0aed --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/memcached-chunks/memcached-chunks-svc-headless.yaml @@ -0,0 +1 @@ +{{- include "mimir.memcached.service" (dict "ctx" $ "component" "memcached-chunks" ) }} diff --git a/operations/helm/charts/mimir-distributed/templates/memcached-index-queries/memcached-index-queries-pdb.yaml b/operations/helm/charts/mimir-distributed/templates/memcached-index-queries/memcached-index-queries-pdb.yaml new file mode 100644 index 0000000000..0cae6bdb20 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/memcached-index-queries/memcached-index-queries-pdb.yaml @@ -0,0 +1 @@ +{{- include "mimir.memcached.podDisruptionBudget" (dict "ctx" $ "component" "memcached-index-queries" ) }} diff --git a/operations/helm/charts/mimir-distributed/templates/memcached-index-queries/memcached-index-queries-servmon.yaml b/operations/helm/charts/mimir-distributed/templates/memcached-index-queries/memcached-index-queries-servmon.yaml new file mode 100644 index 0000000000..431202742f --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/memcached-index-queries/memcached-index-queries-servmon.yaml @@ -0,0 +1,3 @@ +{{- if index .Values "memcached-index-queries" "enabled" }} +{{- include "mimir.lib.serviceMonitor" (dict "ctx" $ "component" "memcached-index-queries") }} +{{- end -}} diff --git a/operations/helm/charts/mimir-distributed/templates/memcached-index-queries/memcached-index-queries-statefulset.yaml b/operations/helm/charts/mimir-distributed/templates/memcached-index-queries/memcached-index-queries-statefulset.yaml new file mode 100644 index 0000000000..44832230d5 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/memcached-index-queries/memcached-index-queries-statefulset.yaml @@ -0,0 +1 @@ +{{- include "mimir.memcached.statefulSet" (dict "ctx" $ "component" "memcached-index-queries" ) }} diff --git a/operations/helm/charts/mimir-distributed/templates/memcached-index-queries/memcached-index-queries-svc-headless.yaml b/operations/helm/charts/mimir-distributed/templates/memcached-index-queries/memcached-index-queries-svc-headless.yaml new file mode 100644 index 0000000000..d4fb5815b0 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/memcached-index-queries/memcached-index-queries-svc-headless.yaml @@ -0,0 +1 @@ +{{- include "mimir.memcached.service" (dict "ctx" $ "component" "memcached-index-queries" ) }} diff --git a/operations/helm/charts/mimir-distributed/templates/memcached-metadata/memcached-metadata-pdb.yaml b/operations/helm/charts/mimir-distributed/templates/memcached-metadata/memcached-metadata-pdb.yaml new file mode 100644 index 0000000000..5d7f058640 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/memcached-metadata/memcached-metadata-pdb.yaml @@ -0,0 +1 @@ +{{- include "mimir.memcached.podDisruptionBudget" (dict "ctx" $ "component" "memcached-metadata" ) }} diff --git a/operations/helm/charts/mimir-distributed/templates/memcached-metadata/memcached-metadata-servmon.yaml b/operations/helm/charts/mimir-distributed/templates/memcached-metadata/memcached-metadata-servmon.yaml new file mode 100644 index 0000000000..a8d97306d7 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/memcached-metadata/memcached-metadata-servmon.yaml @@ -0,0 +1,3 @@ +{{- if index .Values "memcached-metadata" "enabled" }} +{{- include "mimir.lib.serviceMonitor" (dict "ctx" $ "component" "memcached-metadata") }} +{{- end -}} diff --git a/operations/helm/charts/mimir-distributed/templates/memcached-metadata/memcached-metadata-statefulset.yaml b/operations/helm/charts/mimir-distributed/templates/memcached-metadata/memcached-metadata-statefulset.yaml new file mode 100644 index 0000000000..e0f11524c7 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/memcached-metadata/memcached-metadata-statefulset.yaml @@ -0,0 +1 @@ +{{- include "mimir.memcached.statefulSet" (dict "ctx" $ "component" "memcached-metadata" ) }} diff --git a/operations/helm/charts/mimir-distributed/templates/memcached-metadata/memcached-metadata-svc-headless.yaml b/operations/helm/charts/mimir-distributed/templates/memcached-metadata/memcached-metadata-svc-headless.yaml new file mode 100644 index 0000000000..25b557049b --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/memcached-metadata/memcached-metadata-svc-headless.yaml @@ -0,0 +1 @@ +{{- include "mimir.memcached.service" (dict "ctx" $ "component" "memcached-metadata" ) }} diff --git a/operations/helm/charts/mimir-distributed/templates/memcached-results/memcached-results-pdb.yaml b/operations/helm/charts/mimir-distributed/templates/memcached-results/memcached-results-pdb.yaml new file mode 100644 index 0000000000..a87e5f95ed --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/memcached-results/memcached-results-pdb.yaml @@ -0,0 +1 @@ +{{- include "mimir.memcached.podDisruptionBudget" (dict "ctx" $ "component" "memcached-results" ) }} diff --git a/operations/helm/charts/mimir-distributed/templates/memcached-results/memcached-results-servmon.yaml b/operations/helm/charts/mimir-distributed/templates/memcached-results/memcached-results-servmon.yaml new file mode 100644 index 0000000000..a9662b548c --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/memcached-results/memcached-results-servmon.yaml @@ -0,0 +1,3 @@ +{{- if index .Values "memcached-results" "enabled" }} +{{- include "mimir.lib.serviceMonitor" (dict "ctx" $ "component" "memcached-results") }} +{{- end -}} diff --git a/operations/helm/charts/mimir-distributed/templates/memcached-results/memcached-results-statefulset.yaml b/operations/helm/charts/mimir-distributed/templates/memcached-results/memcached-results-statefulset.yaml new file mode 100644 index 0000000000..cdbb4042fa --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/memcached-results/memcached-results-statefulset.yaml @@ -0,0 +1 @@ +{{- include "mimir.memcached.statefulSet" (dict "ctx" $ "component" "memcached-results" ) }} diff --git a/operations/helm/charts/mimir-distributed/templates/memcached-results/memcached-results-svc-headless.yaml b/operations/helm/charts/mimir-distributed/templates/memcached-results/memcached-results-svc-headless.yaml new file mode 100644 index 0000000000..e8de28cf12 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/memcached-results/memcached-results-svc-headless.yaml @@ -0,0 +1 @@ +{{- include "mimir.memcached.service" (dict "ctx" $ "component" "memcached-results" ) }} diff --git a/operations/helm/charts/mimir-distributed/templates/memcached/_memcached-pdb.tpl b/operations/helm/charts/mimir-distributed/templates/memcached/_memcached-pdb.tpl new file mode 100644 index 0000000000..e25902abe6 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/memcached/_memcached-pdb.tpl @@ -0,0 +1,23 @@ +{{/* +memcached PodDisruptionBudget +*/}} +{{- define "mimir.memcached.podDisruptionBudget" -}} +{{ with (index $.ctx.Values $.component) }} +{{- if .enabled -}} +{{- if .podDisruptionBudget -}} +apiVersion: {{ include "mimir.podDisruptionBudget.apiVersion" $ }} +kind: PodDisruptionBudget +metadata: + name: {{ include "mimir.resourceName" (dict "ctx" $.ctx "component" $.component) }} + labels: + {{- include "mimir.labels" (dict "ctx" $.ctx "component" $.component) | nindent 4 }} + namespace: {{ $.ctx.Release.Namespace | quote }} +spec: + selector: + matchLabels: + {{- include "mimir.selectorLabels" (dict "ctx" $.ctx "component" $.component) | nindent 6 }} +{{ toYaml .podDisruptionBudget | indent 2 }} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/operations/helm/charts/mimir-distributed/templates/memcached/_memcached-statefulset.tpl b/operations/helm/charts/mimir-distributed/templates/memcached/_memcached-statefulset.tpl new file mode 100644 index 0000000000..ec08c05ad2 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/memcached/_memcached-statefulset.tpl @@ -0,0 +1,116 @@ +{{/* +memcached StatefulSet +*/}} +{{- define "mimir.memcached.statefulSet" -}} +{{ with (index $.ctx.Values $.component) }} +{{- if .enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "mimir.resourceName" (dict "ctx" $.ctx "component" $.component) }} + labels: + {{- include "mimir.labels" (dict "ctx" $.ctx "component" "memcached") | nindent 4 }} + annotations: + {{- toYaml .annotations | nindent 4 }} + namespace: {{ $.ctx.Release.Namespace | quote }} +spec: + podManagementPolicy: {{ .podManagementPolicy }} + replicas: {{ .replicas }} + selector: + matchLabels: + {{- include "mimir.selectorLabels" (dict "ctx" $.ctx "component" $.component) | nindent 6 }} + updateStrategy: + {{- toYaml .statefulStrategy | nindent 4 }} + serviceName: {{ template "mimir.fullname" $.ctx }}-{{ $.component }} + + template: + metadata: + labels: + {{- include "mimir.podLabels" (dict "ctx" $.ctx "component" $.component) | nindent 8 }} + {{- with .podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + {{- with $.ctx.Values.global.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + + spec: + serviceAccountName: {{ template "mimir.serviceAccountName" $.ctx }} + {{- if .priorityClassName }} + priorityClassName: {{ .priorityClassName }} + {{- end }} + securityContext: + {{- toYaml $.ctx.Values.memcached.podSecurityContext | nindent 8 }} + initContainers: + {{- toYaml .initContainers | nindent 8 }} + nodeSelector: + {{- toYaml .nodeSelector | nindent 8 }} + affinity: + {{- toYaml .affinity | nindent 8 }} + tolerations: + {{- toYaml .tolerations | nindent 8 }} + terminationGracePeriodSeconds: {{ .terminationGracePeriodSeconds }} + containers: + {{- if .extraContainers }} + {{ toYaml .extraContainers | nindent 8 }} + {{- end }} + - name: memcached + {{- with $.ctx.Values.memcached.image }} + image: {{ .repository }}:{{ .tag }} + imagePullPolicy: {{ .pullPolicy }} + {{- end }} + resources: + {{- if .resources }} + {{- toYaml .resources | nindent 12 }} + {{- else }} + limits: + memory: {{ round (mulf .allocatedMemory 1.2) 0 }}Mi + requests: + cpu: 500m + memory: {{ round (mulf .allocatedMemory 1.2) 0 }}Mi + {{- end }} + ports: + - containerPort: {{ .port }} + name: client + args: + - -m {{ .allocatedMemory }} + - -o + - modern + - -I {{ .maxItemMemory }}m + - -c 16384 + - -v + - -u {{ .port }} + {{- range $key, $value := .extraArgs }} + - "-{{ $key }} {{ $value }}" + {{- end }} + env: + {{- with $.ctx.Values.global.extraEnv }} + {{ toYaml . | nindent 12 }} + {{- end }} + envFrom: + {{- with $.ctx.Values.global.extraEnvFrom }} + {{- toYaml . | nindent 12 }} + {{- end }} + securityContext: + {{- toYaml $.ctx.Values.memcached.containerSecurityContext | nindent 12 }} + + {{- if $.ctx.Values.memcachedExporter.enabled }} + - name: exporter + {{- with $.ctx.Values.memcachedExporter.image }} + image: {{ .repository}}:{{ .tag }} + imagePullPolicy: {{ .pullPolicy }} + {{- end }} + ports: + - containerPort: 9150 + name: http-metrics + args: + - "--memcached.address=localhost:{{ .port }}" + - "--web.listen-address=0.0.0.0:9150" + {{- end }} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/operations/helm/charts/mimir-distributed/templates/memcached/_memcached-svc.tpl b/operations/helm/charts/mimir-distributed/templates/memcached/_memcached-svc.tpl new file mode 100644 index 0000000000..b70862a6af --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/memcached/_memcached-svc.tpl @@ -0,0 +1,35 @@ +{{/* +memcached Service +*/}} +{{- define "mimir.memcached.service" -}} +{{ with (index $.ctx.Values $.component) }} +{{- if .enabled -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "mimir.resourceName" (dict "ctx" $.ctx "component" $.component) }} + labels: + {{- include "mimir.labels" (dict "ctx" $.ctx "component" $.component) | nindent 4 }} + {{- with .service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- toYaml .service.annotations | nindent 4 }} + namespace: {{ $.ctx.Release.Namespace | quote }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: memcached-client + port: {{ .port }} + targetPort: {{ .port }} + {{ if $.ctx.Values.memcachedExporter.enabled }} + - name: http-metrics + port: 9150 + targetPort: 9150 + {{ end }} + selector: + {{- include "mimir.selectorLabels" (dict "ctx" $.ctx "component" $.component) | nindent 4 }} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/operations/helm/charts/mimir-distributed/values.yaml b/operations/helm/charts/mimir-distributed/values.yaml index 2b8c06845a..c45e348221 100644 --- a/operations/helm/charts/mimir-distributed/values.yaml +++ b/operations/helm/charts/mimir-distributed/values.yaml @@ -118,27 +118,27 @@ mimir: blocks_storage: backend: s3 bucket_store: - {{- if .Values.memcached.enabled }} + {{- if index .Values "memcached-chunks" "enabled" }} chunks_cache: backend: memcached memcached: - addresses: dns+{{ .Release.Name }}-memcached.{{ .Release.Namespace }}.svc:11211 - max_item_size: {{ .Values.memcached.maxItemMemory }} + addresses: dns+{{ template "mimir.fullname" . }}-memcached-chunks.{{ .Release.Namespace }}.svc:{{ (index .Values "memcached-chunks").port }} + max_item_size: {{ mul (index .Values "memcached-chunks").maxItemMemory 1024 1024 }} timeout: 450ms {{- end }} - {{- if index .Values "memcached-queries" "enabled" }} + {{- if index .Values "memcached-index-queries" "enabled" }} index_cache: backend: memcached memcached: - addresses: dns+{{ .Release.Name }}-memcached-queries.{{ .Release.Namespace }}.svc:11211 - max_item_size: {{ (index .Values "memcached-queries").maxItemMemory }} + addresses: dns+{{ template "mimir.fullname" . }}-memcached-index-queries.{{ .Release.Namespace }}.svc:{{ (index .Values "memcached-index-queries").port }} + max_item_size: {{ mul (index .Values "memcached-index-queries").maxItemMemory 1024 1024 }} {{- end }} {{- if index .Values "memcached-metadata" "enabled" }} metadata_cache: backend: memcached memcached: - addresses: dns+{{ .Release.Name }}-memcached-metadata.{{ .Release.Namespace }}.svc:11211 - max_item_size: {{ (index .Values "memcached-metadata").maxItemMemory }} + addresses: dns+{{ template "mimir.fullname" . }}-memcached-metadata.{{ .Release.Namespace }}.svc:{{ (index .Values "memcached-metadata").port }} + max_item_size: {{ mul (index .Values "memcached-metadata").maxItemMemory 1024 1024 }} {{- end }} sync_dir: /data/tsdb-sync {{- if .Values.minio.enabled }} @@ -166,8 +166,8 @@ mimir: results_cache: backend: memcached memcached: - addresses: dns+{{ .Release.Name }}-memcached-results.{{ .Release.Namespace }}.svc:11211 - max_item_size: {{ (index .Values "memcached-results").maxItemMemory }} + addresses: dns+{{ template "mimir.fullname" . }}-memcached-results.{{ .Release.Namespace }}.svc:{{ (index .Values "memcached-results").port }} + max_item_size: {{ mul (index .Values "memcached-results").maxItemMemory 1024 1024 }} cache_results: true {{- end }} @@ -1044,132 +1044,273 @@ compactor: extraEnvFrom: [] memcached: - enabled: false - architecture: high-availability - arguments: - - -m 8192 - - -o - - modern - - -v - - -I 1m - - -c 4096 image: + # -- Memcached Docker image repository repository: memcached - tag: 1.6.9 - # maxItemMemory is in bytes. Should match memcached -I flag (which is in MB) - # It is a string to avoid https://github.com/helm/helm/issues/1707. - maxItemMemory: '1048576' # (* 1 (* 1024 1024)) - metrics: - enabled: true - image: - registry: quay.io - repository: prometheus/memcached-exporter - tag: v0.9.0 - replicaCount: 1 - resources: - limits: - # memory limits should match requests - memory: 9830Mi - requests: - cpu: 500m - # memory requests should be exceed memcached -m flag - memory: 9830Mi # (floor (* 1.2 8192)) + # -- Memcached Docker image tag + tag: 1.6.9-alpine + # -- Memcached Docker image pull policy + pullPolicy: IfNotPresent + + # -- The SecurityContext for memcached pods + podSecurityContext: + fsGroup: 11211 + runAsGroup: 11211 + runAsNonRoot: true + runAsUser: 11211 + + # -- The SecurityContext for memcached containers + containerSecurityContext: + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + allowPrivilegeEscalation: false + +memcachedExporter: + # -- Whether memcached metrics should be exported + enabled: true -memcached-queries: - enabled: false - architecture: high-availability - arguments: - - -m 2048 - - -o - - modern - - -v - - -I 15m - - -c 1024 image: - repository: memcached - tag: 1.6.9 - # maxItemMemory is in bytes. Should match memcached -I flag (which is in MB) - # It is a string to avoid https://github.com/helm/helm/issues/1707. - maxItemMemory: '15728640' # (* 15 (* 1024 1024)) - metrics: - enabled: true - image: - registry: quay.io - repository: prometheus/memcached-exporter - tag: v0.9.0 - replicaCount: 1 - resources: - limits: - # memory limits should match requests - memory: 2457Mi - requests: - cpu: 500m - # memory requests should be exceed memcached -m flag - memory: 2457Mi # (floor (* 1.2 2048)) + repository: prom/memcached-exporter + tag: v0.6.0 + pullPolicy: IfNotPresent + +memcached-chunks: + # -- Specifies whether memcached-chunks should be enabled + enabled: false + + # -- Total number of memcached-chunks replicas + replicas: 1 + + # -- Port of the memcached-chunks service + port: 11211 + + # -- Amount of memory allocated to memcached-chunks for object storage (in MB). + allocatedMemory: 8192 + + # -- Maximum item memory for memcached-chunks (in MB). + maxItemMemory: 1 + + # -- Extra init containers for memcached-chunks pods + initContainers: [] + + # -- Annotations for the memcached-chunks pods + annotations: {} + # -- Node selector for memcached-chunks pods + nodeSelector: {} + # -- Affinity for memcached-chunks pods + affinity: {} + # -- Tolerations for memcached-chunks pods + tolerations: [] + # -- Pod Disruption Budget + podDisruptionBudget: {} + # -- The name of the PriorityClass for memcached-chunks pods + priorityClassName: null + # -- Labels for memcached-chunks pods + podLabels: {} + # -- Annotations for memcached-chunks pods + podAnnotations: {} + # -- Management policy for memcached-chunks pods + podManagementPolicy: Parallel + # -- Grace period to allow the memcached-chunks to shutdown before it is killed + terminationGracePeriodSeconds: 60 + + # -- Stateful memcached-chunks strategy + statefulStrategy: + type: RollingUpdate + + # -- Additional CLI args for memcached-chunks + extraArgs: [] + + # -- Additional containers to be added to the memcached-chunks pod. + extraContainers: [] + + # -- Resource requests and limits for the memcached-results + # By default a safe memory limit will be requested based on allocatedMemory value (floor (* 1.2 allocatedMemory)). + resources: null + + # -- Service annotations and labels + service: + annotations: {} + labels: {} + +memcached-index-queries: + # -- Specifies whether memcached-index-queries should be enabled + enabled: false + + # -- Total number of memcached-index-queries replicas + replicas: 1 + + # -- Port of the memcached-index-queries service + port: 11211 + + # -- Amount of memory allocated to memcached-index-queries for object storage (in MB). + allocatedMemory: 2048 + + # -- Maximum item memcached-index-queries for memcached (in MB). + maxItemMemory: 15 + + # -- Extra init containers for memcached-index-queries pods + initContainers: [] + + # -- Annotations for the memcached-index-queries pods + annotations: {} + # -- Node selector for memcached-index-queries pods + nodeSelector: {} + # -- Affinity for memcached-index-queries pods + affinity: {} + # -- Tolerations for memcached-index-queries pods + tolerations: [] + # -- Pod Disruption Budget + podDisruptionBudget: {} + # -- The name of the PriorityClass for memcached-index-queries pods + priorityClassName: null + # -- Labels for memcached-index-queries pods + podLabels: {} + # -- Annotations for memcached-index-queries pods + podAnnotations: {} + # -- Management policy for memcached-index-queries pods + podManagementPolicy: Parallel + # -- Grace period to allow the memcached-index-queries to shutdown before it is killed + terminationGracePeriodSeconds: 60 + + # -- Stateful memcached-index-queries strategy + statefulStrategy: + type: RollingUpdate + + # -- Additional CLI args for memcached-index-queries + extraArgs: [] + + # -- Additional containers to be added to the memcached-index-queries pod. + extraContainers: [] + + # -- Resource requests and limits for the memcached-results + # By default a safe memory limit will be requested based on allocatedMemory value (floor (* 1.2 allocatedMemory)). + resources: null + + # -- Service annotations and labels + service: + annotations: {} + labels: {} memcached-metadata: + # -- Specifies whether memcached-metadata should be enabled enabled: false - architecture: high-availability - arguments: - - -m 512 - - -o - - modern - - -v - - -I 1m - - -c 1024 - image: - repository: memcached - tag: 1.6.9 - # maxItemMemory is in bytes. Should match memcached -I flag (which is in MB) - # It is a string to avoid https://github.com/helm/helm/issues/1707. - maxItemMemory: '1048576' # (* 1 (* 1024 1024)) - metrics: - enabled: true - image: - registry: quay.io - repository: prometheus/memcached-exporter - tag: v0.9.0 - replicaCount: 1 - resources: - limits: - # memory limits should match requests - memory: 614Mi - requests: - cpu: 500m - # memory requests should be exceed memcached -m flag - memory: 614Mi # (floor (* 1.2 512)) + + # -- Total number of memcached-metadata replicas + replicas: 1 + + # -- Port of the memcached-metadata service + port: 11211 + + # -- Amount of memory allocated to memcached-metadata for object storage (in MB). + allocatedMemory: 512 + + # -- Maximum item memcached-metadata for memcached (in MB). + maxItemMemory: 1 + + # -- Extra init containers for memcached-metadata pods + initContainers: [] + + # -- Annotations for the memcached-metadata pods + annotations: {} + # -- Node selector for memcached-metadata pods + nodeSelector: {} + # -- Affinity for memcached-metadata pods + affinity: {} + # -- Tolerations for memcached-metadata pods + tolerations: [] + # -- Pod Disruption Budget + podDisruptionBudget: {} + # -- The name of the PriorityClass for memcached-metadata pods + priorityClassName: null + # -- Labels for memcached-metadata pods + podLabels: {} + # -- Annotations for memcached-metadata pods + podAnnotations: {} + # -- Management policy for memcached-metadata pods + podManagementPolicy: Parallel + # -- Grace period to allow the memcached-metadata to shutdown before it is killed + terminationGracePeriodSeconds: 60 + + # -- Stateful memcached-metadata strategy + statefulStrategy: + type: RollingUpdate + + # -- Additional CLI args for memcached-metadata + extraArgs: [] + + # -- Additional containers to be added to the memcached-metadata pod. + extraContainers: [] + + # -- Resource requests and limits for the memcached-results + # By default a safe memory limit will be requested based on allocatedMemory value (floor (* 1.2 allocatedMemory)). + resources: null + + # -- Service annotations and labels + service: + annotations: {} + labels: {} memcached-results: + # -- Specifies whether memcached-results should be enabled enabled: false - architecture: high-availability - arguments: - - -m 512 - - -o - - modern - - -v - - -I 1m - - -c 1024 - image: - repository: memcached - tag: 1.6.9 - # maxItemMemory is in bytes. Should match memcached -I flag (which is in MB) - # It is a string to avoid https://github.com/helm/helm/issues/1707. - maxItemMemory: '1048576' # (* 1 (* 1024 1024)) - metrics: - enabled: true - image: - registry: quay.io - repository: prometheus/memcached-exporter - tag: v0.9.0 - replicaCount: 1 - resources: - limits: - # memory limits should match requests - memory: 614Mi - requests: - cpu: 500m - # memory requests should be exceed memcached -m flag - memory: 614Mi # (floor (* 1.2 512)) + + # -- Total number of memcached-results replicas + replicas: 1 + + # -- Port of the memcached-results service + port: 11211 + + # -- Amount of memory allocated to memcached-results for object storage (in MB). + allocatedMemory: 512 + + # -- Maximum item memcached-results for memcached (in MB). + maxItemMemory: 1 + + # -- Extra init containers for memcached-results pods + initContainers: [] + + # -- Annotations for the memcached-results pods + annotations: {} + # -- Node selector for memcached-results pods + nodeSelector: {} + # -- Affinity for memcached-results pods + affinity: {} + # -- Tolerations for memcached-results pods + tolerations: [] + # -- Pod Disruption Budget + podDisruptionBudget: {} + # -- The name of the PriorityClass for memcached-results pods + priorityClassName: null + # -- Labels for memcached-results pods + podLabels: {} + # -- Annotations for memcached-results pods + podAnnotations: {} + # -- Management policy for memcached-results pods + podManagementPolicy: Parallel + # -- Grace period to allow the memcached-results to shutdown before it is killed + terminationGracePeriodSeconds: 60 + + # -- Stateful memcached-results strategy + statefulStrategy: + type: RollingUpdate + + # -- Additional CLI args for memcached-results + extraArgs: [] + + # -- Additional containers to be added to the memcached-results pod. + extraContainers: [] + + # -- Resource requests and limits for the memcached-results + # By default a safe memory limit will be requested based on allocatedMemory value (floor (* 1.2 allocatedMemory)). + resources: null + + # -- Service annotations and labels + service: + annotations: {} + labels: {} minio: enabled: true diff --git a/operations/helm/ct.yaml b/operations/helm/ct.yaml index ddbf04d583..a8403230a2 100644 --- a/operations/helm/ct.yaml +++ b/operations/helm/ct.yaml @@ -4,8 +4,6 @@ target-branch: main chart-dirs: - operations/helm/charts chart-repos: - - bitnami=https://charts.bitnami.com/bitnami - - bitnami-pre-2022=https://raw.githubusercontent.com/bitnami/charts/eb5f9a9513d987b519f0ecd732e7031241c50328/bitnami - minio=https://helm.min.io helm-extra-args: --timeout 600s validate-maintainers: false diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-chunks/memcached-chunks-statefulset.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-chunks/memcached-chunks-statefulset.yaml new file mode 100644 index 0000000000..115014de66 --- /dev/null +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-chunks/memcached-chunks-statefulset.yaml @@ -0,0 +1,91 @@ +--- +# Source: mimir-distributed/templates/memcached-chunks/memcached-chunks-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-oss-values-mimir-memcached-chunks + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-values + app.kubernetes.io/component: memcached + app.kubernetes.io/version: "2.1.0" + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-values + app.kubernetes.io/component: memcached-chunks + updateStrategy: + type: RollingUpdate + serviceName: test-oss-values-mimir-memcached-chunks + + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-values + app.kubernetes.io/version: "2.1.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: memcached-chunks + annotations: + minio-secret-version: "42" + + spec: + serviceAccountName: test-oss-values-mimir + securityContext: + fsGroup: 11211 + runAsGroup: 11211 + runAsNonRoot: true + runAsUser: 11211 + initContainers: + [] + nodeSelector: + {} + affinity: + {} + tolerations: + [] + terminationGracePeriodSeconds: 60 + containers: + - name: memcached + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 10m + ports: + - containerPort: 11211 + name: client + args: + - -m 10 + - -o + - modern + - -I 1m + - -c 16384 + - -v + - -u 11211 + env: + envFrom: + - secretRef: + name: mimir-minio-secret + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + - name: exporter + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9150 + name: http-metrics + args: + - "--memcached.address=localhost:11211" + - "--web.listen-address=0.0.0.0:9150" diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-chunks/memcached-chunks-svc-headless.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-chunks/memcached-chunks-svc-headless.yaml new file mode 100644 index 0000000000..4e568287d9 --- /dev/null +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-chunks/memcached-chunks-svc-headless.yaml @@ -0,0 +1,31 @@ +--- +# Source: mimir-distributed/templates/memcached-chunks/memcached-chunks-svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-oss-values-mimir-memcached-chunks + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-values + app.kubernetes.io/component: memcached-chunks + app.kubernetes.io/version: "2.1.0" + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + + - name: http-metrics + port: 9150 + targetPort: 9150 + + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-values + app.kubernetes.io/component: memcached-chunks diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-index-queries/memcached-index-queries-statefulset.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-index-queries/memcached-index-queries-statefulset.yaml new file mode 100644 index 0000000000..7b40f068f2 --- /dev/null +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-index-queries/memcached-index-queries-statefulset.yaml @@ -0,0 +1,91 @@ +--- +# Source: mimir-distributed/templates/memcached-index-queries/memcached-index-queries-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-oss-values-mimir-memcached-index-queries + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-values + app.kubernetes.io/component: memcached + app.kubernetes.io/version: "2.1.0" + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-values + app.kubernetes.io/component: memcached-index-queries + updateStrategy: + type: RollingUpdate + serviceName: test-oss-values-mimir-memcached-index-queries + + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-values + app.kubernetes.io/version: "2.1.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: memcached-index-queries + annotations: + minio-secret-version: "42" + + spec: + serviceAccountName: test-oss-values-mimir + securityContext: + fsGroup: 11211 + runAsGroup: 11211 + runAsNonRoot: true + runAsUser: 11211 + initContainers: + [] + nodeSelector: + {} + affinity: + {} + tolerations: + [] + terminationGracePeriodSeconds: 60 + containers: + - name: memcached + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 10m + ports: + - containerPort: 11211 + name: client + args: + - -m 30 + - -o + - modern + - -I 15m + - -c 16384 + - -v + - -u 11211 + env: + envFrom: + - secretRef: + name: mimir-minio-secret + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + - name: exporter + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9150 + name: http-metrics + args: + - "--memcached.address=localhost:11211" + - "--web.listen-address=0.0.0.0:9150" diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-index-queries/memcached-index-queries-svc-headless.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-index-queries/memcached-index-queries-svc-headless.yaml new file mode 100644 index 0000000000..54bdf01a53 --- /dev/null +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-index-queries/memcached-index-queries-svc-headless.yaml @@ -0,0 +1,31 @@ +--- +# Source: mimir-distributed/templates/memcached-index-queries/memcached-index-queries-svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-oss-values-mimir-memcached-index-queries + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-values + app.kubernetes.io/component: memcached-index-queries + app.kubernetes.io/version: "2.1.0" + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + + - name: http-metrics + port: 9150 + targetPort: 9150 + + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-values + app.kubernetes.io/component: memcached-index-queries diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-metadata/memcached-metadata-statefulset.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-metadata/memcached-metadata-statefulset.yaml new file mode 100644 index 0000000000..ca5ceed27e --- /dev/null +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-metadata/memcached-metadata-statefulset.yaml @@ -0,0 +1,91 @@ +--- +# Source: mimir-distributed/templates/memcached-metadata/memcached-metadata-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-oss-values-mimir-memcached-metadata + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-values + app.kubernetes.io/component: memcached + app.kubernetes.io/version: "2.1.0" + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-values + app.kubernetes.io/component: memcached-metadata + updateStrategy: + type: RollingUpdate + serviceName: test-oss-values-mimir-memcached-metadata + + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-values + app.kubernetes.io/version: "2.1.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: memcached-metadata + annotations: + minio-secret-version: "42" + + spec: + serviceAccountName: test-oss-values-mimir + securityContext: + fsGroup: 11211 + runAsGroup: 11211 + runAsNonRoot: true + runAsUser: 11211 + initContainers: + [] + nodeSelector: + {} + affinity: + {} + tolerations: + [] + terminationGracePeriodSeconds: 60 + containers: + - name: memcached + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 10m + ports: + - containerPort: 11211 + name: client + args: + - -m 10 + - -o + - modern + - -I 1m + - -c 16384 + - -v + - -u 11211 + env: + envFrom: + - secretRef: + name: mimir-minio-secret + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + - name: exporter + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9150 + name: http-metrics + args: + - "--memcached.address=localhost:11211" + - "--web.listen-address=0.0.0.0:9150" diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-metadata/memcached-metadata-svc-headless.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-metadata/memcached-metadata-svc-headless.yaml new file mode 100644 index 0000000000..764614c479 --- /dev/null +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-metadata/memcached-metadata-svc-headless.yaml @@ -0,0 +1,31 @@ +--- +# Source: mimir-distributed/templates/memcached-metadata/memcached-metadata-svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-oss-values-mimir-memcached-metadata + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-values + app.kubernetes.io/component: memcached-metadata + app.kubernetes.io/version: "2.1.0" + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + + - name: http-metrics + port: 9150 + targetPort: 9150 + + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-values + app.kubernetes.io/component: memcached-metadata diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-results/memcached-results-statefulset.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-results/memcached-results-statefulset.yaml new file mode 100644 index 0000000000..c7c61e5431 --- /dev/null +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-results/memcached-results-statefulset.yaml @@ -0,0 +1,91 @@ +--- +# Source: mimir-distributed/templates/memcached-results/memcached-results-statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: test-oss-values-mimir-memcached-results + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-values + app.kubernetes.io/component: memcached + app.kubernetes.io/version: "2.1.0" + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + podManagementPolicy: Parallel + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-values + app.kubernetes.io/component: memcached-results + updateStrategy: + type: RollingUpdate + serviceName: test-oss-values-mimir-memcached-results + + template: + metadata: + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-values + app.kubernetes.io/version: "2.1.0" + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: memcached-results + annotations: + minio-secret-version: "42" + + spec: + serviceAccountName: test-oss-values-mimir + securityContext: + fsGroup: 11211 + runAsGroup: 11211 + runAsNonRoot: true + runAsUser: 11211 + initContainers: + [] + nodeSelector: + {} + affinity: + {} + tolerations: + [] + terminationGracePeriodSeconds: 60 + containers: + - name: memcached + image: memcached:1.6.9-alpine + imagePullPolicy: IfNotPresent + resources: + requests: + cpu: 10m + ports: + - containerPort: 11211 + name: client + args: + - -m 10 + - -o + - modern + - -I 1m + - -c 16384 + - -v + - -u 11211 + env: + envFrom: + - secretRef: + name: mimir-minio-secret + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + - name: exporter + image: prom/memcached-exporter:v0.6.0 + imagePullPolicy: IfNotPresent + ports: + - containerPort: 9150 + name: http-metrics + args: + - "--memcached.address=localhost:11211" + - "--web.listen-address=0.0.0.0:9150" diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-results/memcached-results-svc-headless.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-results/memcached-results-svc-headless.yaml new file mode 100644 index 0000000000..10063b0b11 --- /dev/null +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/memcached-results/memcached-results-svc-headless.yaml @@ -0,0 +1,31 @@ +--- +# Source: mimir-distributed/templates/memcached-results/memcached-results-svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: test-oss-values-mimir-memcached-results + labels: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-values + app.kubernetes.io/component: memcached-results + app.kubernetes.io/version: "2.1.0" + app.kubernetes.io/managed-by: Helm + annotations: + {} + namespace: "citestns" +spec: + type: ClusterIP + clusterIP: None + ports: + - name: memcached-client + port: 11211 + targetPort: 11211 + + - name: http-metrics + port: 9150 + targetPort: 9150 + + selector: + app.kubernetes.io/name: mimir + app.kubernetes.io/instance: test-oss-values + app.kubernetes.io/component: memcached-results diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/mimir-config.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/mimir-config.yaml index 6df56c5260..dc626f0439 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/mimir-config.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/mimir-config.yaml @@ -30,6 +30,22 @@ data: blocks_storage: backend: s3 bucket_store: + chunks_cache: + backend: memcached + memcached: + addresses: dns+test-oss-values-mimir-memcached-chunks.citestns.svc:11211 + max_item_size: 1048576 + timeout: 450ms + index_cache: + backend: memcached + memcached: + addresses: dns+test-oss-values-mimir-memcached-index-queries.citestns.svc:11211 + max_item_size: 15728640 + metadata_cache: + backend: memcached + memcached: + addresses: dns+test-oss-values-mimir-memcached-metadata.citestns.svc:11211 + max_item_size: 1048576 sync_dir: /data/tsdb-sync s3: access_key_id: ${MINIO_ACCESS_KEY_ID} @@ -43,7 +59,13 @@ data: data_dir: /data frontend: align_queries_with_step: true + cache_results: true log_queries_longer_than: 10s + results_cache: + backend: memcached + memcached: + addresses: dns+test-oss-values-mimir-memcached-results.citestns.svc:11211 + max_item_size: 1048576 frontend_worker: frontend_address: test-oss-values-mimir-query-frontend-headless.citestns.svc:9095 ingester: From b45d2c11cb4b4325f14c902b039c75467b378644 Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Thu, 23 Jun 2022 11:57:24 +0200 Subject: [PATCH 32/63] Helm: add NOTES.txt (#2189) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Helm: add NOTES.txt Add a NOTES.txt that prints the endpoints that will be usable by the user for interacting with mimir/gem. Currently helm does not support rendering the NOTES for testing. Rage/vote here: https://github.com/helm/helm/issues/6901 Signed-off-by: György Krajcsovits --- .../charts/mimir-distributed/CHANGELOG.md | 1 + .../mimir-distributed/templates/NOTES.txt | 33 +++++++++++++++++++ 2 files changed, 34 insertions(+) create mode 100644 operations/helm/charts/mimir-distributed/templates/NOTES.txt diff --git a/operations/helm/charts/mimir-distributed/CHANGELOG.md b/operations/helm/charts/mimir-distributed/CHANGELOG.md index bdcdf8c70e..7e9be5b5fe 100644 --- a/operations/helm/charts/mimir-distributed/CHANGELOG.md +++ b/operations/helm/charts/mimir-distributed/CHANGELOG.md @@ -41,6 +41,7 @@ Entries should include a reference to the Pull Request that introduced the chang * [ENHANCEMENT] Enable `-config.expand-env=true` option in all Mimir services to be able to take secrets/settings from the environment and inject them into the Mimir configuration file. #2017 * [ENHANCEMENT] Add a simple test for enterprise installation #2027 * [ENHANCEMENT] Check for the containerSecurityContext in values file. #2112 +* [ENHANCEMENT] Add `NOTES.txt` to show endpoints URLs for the user at install/upgrade. #2189 ## 2.1.0-beta.7 diff --git a/operations/helm/charts/mimir-distributed/templates/NOTES.txt b/operations/helm/charts/mimir-distributed/templates/NOTES.txt new file mode 100644 index 0000000000..8d047948ac --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/NOTES.txt @@ -0,0 +1,33 @@ +{{- $gateway := .Values.nginx }} +{{- if .Values.enterprise.enabled }} +{{- $gateway = .Values.gateway }} +Welcome to Grafana Enterprise Metrics! +{{- else }} +Welcome to Grafana Mimir! +{{- end }} + +Remote write endpoints for Prometheus or Grafana Agent: +{{ with $gateway.ingress -}} +{{- if .enabled -}} +From outside the cluster via ingress: +{{ range .hosts }} http{{ if .tls }}s{{ end }}://{{ .host }}/api/v1/push +{{ end }} +{{- else -}} +Ingress is not enabled, see {{ if $.Values.enterprise.enabled }}gateway{{ else }}nginx{{ end }}.ingress values. +{{- end -}} +{{- end }} +From inside the cluster: + http://{{ include "mimir.fullname" $ }}-{{ if .Values.enterprise.enabled }}gateway{{ else }}nginx{{ end }}.{{ .Release.Namespace }}.svc:{{ $gateway.service.port | default "80" }}/api/v1/push + +Read address, Grafana data source (Prometheus) URL: +{{ with $gateway.ingress -}} +{{- if .enabled -}} +From outside the cluster via ingress: +{{ range .hosts }} http{{ if .tls }}s{{ end }}://{{ .host }}{{ template "mimir.prometheusHttpPrefix" $ }} +{{ end }} +{{- else -}} +Ingress is not enabled, see {{ if $.Values.enterprise.enabled }}gateway{{ else }}nginx{{ end }}.ingress values. +{{- end -}} +{{- end }} +From inside the cluster: + http://{{ include "mimir.fullname" $ }}-{{ if .Values.enterprise.enabled }}gateway{{ else }}nginx{{ end }}.{{ .Release.Namespace }}.svc:{{ $gateway.service.port | default "80" }}{{ template "mimir.prometheusHttpPrefix" $ }} \ No newline at end of file From b1760cf4257e2e9e6a7f57ddd4275d3eda94e800 Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Thu, 23 Jun 2022 15:01:31 +0200 Subject: [PATCH 33/63] Helm: improve docstring for nameOverride and fullnameOverride (#2198) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Helm: explain nameOverride and fullnameOverride and warn about dashboards * Helm: Fix missing the in NOTEX.txt Signed-off-by: György Krajcsovits --- .../helm/charts/mimir-distributed/templates/NOTES.txt | 4 ++-- operations/helm/charts/mimir-distributed/values.yaml | 7 +++++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/operations/helm/charts/mimir-distributed/templates/NOTES.txt b/operations/helm/charts/mimir-distributed/templates/NOTES.txt index 8d047948ac..24dde8e139 100644 --- a/operations/helm/charts/mimir-distributed/templates/NOTES.txt +++ b/operations/helm/charts/mimir-distributed/templates/NOTES.txt @@ -13,7 +13,7 @@ From outside the cluster via ingress: {{ range .hosts }} http{{ if .tls }}s{{ end }}://{{ .host }}/api/v1/push {{ end }} {{- else -}} -Ingress is not enabled, see {{ if $.Values.enterprise.enabled }}gateway{{ else }}nginx{{ end }}.ingress values. +Ingress is not enabled, see the {{ if $.Values.enterprise.enabled }}gateway{{ else }}nginx{{ end }}.ingress values. {{- end -}} {{- end }} From inside the cluster: @@ -26,7 +26,7 @@ From outside the cluster via ingress: {{ range .hosts }} http{{ if .tls }}s{{ end }}://{{ .host }}{{ template "mimir.prometheusHttpPrefix" $ }} {{ end }} {{- else -}} -Ingress is not enabled, see {{ if $.Values.enterprise.enabled }}gateway{{ else }}nginx{{ end }}.ingress values. +Ingress is not enabled, see the {{ if $.Values.enterprise.enabled }}gateway{{ else }}nginx{{ end }}.ingress values. {{- end -}} {{- end }} From inside the cluster: diff --git a/operations/helm/charts/mimir-distributed/values.yaml b/operations/helm/charts/mimir-distributed/values.yaml index c45e348221..25f1e37e1a 100644 --- a/operations/helm/charts/mimir-distributed/values.yaml +++ b/operations/helm/charts/mimir-distributed/values.yaml @@ -6,10 +6,13 @@ # To configure the resources for production load, refer to the the small.yaml or # large.yaml values files. -# -- Overrides the chart's name +# -- Overrides the chart's name. Used to change mimir/enterprise-metrics infix in the resource names. E.g. myRelease-mimir-ingester-1 to myRelease-nameOverride-ingester-1. +# This option is used to align resource names with Cortex, when doing a migration from Cortex to Grafana Mimir. +# Note: Grafana provided dashboards rely on the default naming and will need changes. nameOverride: null -# -- Overrides the chart's computed fullname +# -- Overrides the chart's computed fullname. Used to change the full prefix of resource names. E.g. myRelease-mimir-ingester-1 to fullnameOverride-ingester-1. +# Note: Grafana provided dashboards rely on the default naming and will need changes. fullnameOverride: null # Container image settings. From 3fe1f4262cca31c0e7910097d574e643bb3f67c6 Mon Sep 17 00:00:00 2001 From: Steve Simpson Date: Thu, 23 Jun 2022 16:02:39 +0200 Subject: [PATCH 34/63] Mixin: Make each `MimirCompactorHasNotUploadedBlocks` alert distinct. (#2197) When this alert is evaluated in Mimir itself, it can cause false-positive rule evaluation failures. This is because multiple alerts with the same name can fire in concurrently, causing duplicate series to be written. --- CHANGELOG.md | 1 + operations/mimir-mixin-compiled/alerts.yaml | 3 +++ operations/mimir-mixin/alerts/compactor.libsonnet | 3 +++ 3 files changed, 7 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index ac38fcba1a..1b06d59468 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -67,6 +67,7 @@ * [BUGFIX] Do not trigger `MimirAllocatingTooMuchMemory` alerts if no container limits are supplied. #1905 * [BUGFIX] Dashboards: Remove empty "Chunks per query" panel from `Mimir / Queries` dashboard. #1928 * [BUGFIX] Dashboards: Use Grafana's `$__rate_interval` for rate queries in dashboards to support scrape intervals of >15s. #2011 +* [BUGFIX] Alerts: Make each version of `MimirCompactorHasNotUploadedBlocks` distinct to avoid rule evaluation failures due to duplicate series being generated. #2197 ### Jsonnet diff --git a/operations/mimir-mixin-compiled/alerts.yaml b/operations/mimir-mixin-compiled/alerts.yaml index 200c07b9e3..2c9b29b8f6 100644 --- a/operations/mimir-mixin-compiled/alerts.yaml +++ b/operations/mimir-mixin-compiled/alerts.yaml @@ -655,6 +655,7 @@ groups: (cortex_compactor_last_successful_run_timestamp_seconds > 0) for: 1h labels: + reason: in-last-24h severity: critical - alert: MimirCompactorHasNotSuccessfullyRunCompaction annotations: @@ -664,6 +665,7 @@ groups: cortex_compactor_last_successful_run_timestamp_seconds == 0 for: 24h labels: + reason: since-startup severity: critical - alert: MimirCompactorHasNotSuccessfullyRunCompaction annotations: @@ -672,6 +674,7 @@ groups: expr: | increase(cortex_compactor_runs_failed_total[2h]) >= 2 labels: + reason: consecutive-failures severity: critical - alert: MimirCompactorHasNotUploadedBlocks annotations: diff --git a/operations/mimir-mixin/alerts/compactor.libsonnet b/operations/mimir-mixin/alerts/compactor.libsonnet index 7558c7850f..6f57f747b4 100644 --- a/operations/mimir-mixin/alerts/compactor.libsonnet +++ b/operations/mimir-mixin/alerts/compactor.libsonnet @@ -28,6 +28,7 @@ |||, labels: { severity: 'critical', + reason: 'in-last-24h', }, annotations: { message: '%(product)s Compactor %(alert_instance_variable)s in %(alert_aggregation_variables)s has not run compaction in the last 24 hours.' % $._config, @@ -42,6 +43,7 @@ |||, labels: { severity: 'critical', + reason: 'since-startup', }, annotations: { message: '%(product)s Compactor %(alert_instance_variable)s in %(alert_aggregation_variables)s has not run compaction in the last 24 hours.' % $._config, @@ -55,6 +57,7 @@ |||, labels: { severity: 'critical', + reason: 'consecutive-failures', }, annotations: { message: '%(product)s Compactor %(alert_instance_variable)s in %(alert_aggregation_variables)s failed to run 2 consecutive compactions.' % $._config, From f4a5cf453cdbd143505a9cf4c3d1bfd72cde558e Mon Sep 17 00:00:00 2001 From: Marco Pracucci Date: Fri, 24 Jun 2022 08:52:26 +0200 Subject: [PATCH 35/63] Fixed typo in memberlist admin UI (#2202) Signed-off-by: Marco Pracucci --- pkg/api/memberlist_status.gohtml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/api/memberlist_status.gohtml b/pkg/api/memberlist_status.gohtml index 1e5554677b..920db82230 100644 --- a/pkg/api/memberlist_status.gohtml +++ b/pkg/api/memberlist_status.gohtml @@ -33,7 +33,7 @@ {{ else }} {{ end }} From 1a357193f0e7af8cbb2639c54c37143242d6adae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20=C5=A0tibran=C3=BD?= Date: Fri, 24 Jun 2022 09:47:02 +0200 Subject: [PATCH 36/63] Include link to the talk itself. (#2215) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- docs/configurations/grafanacon-2022/index.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/docs/configurations/grafanacon-2022/index.md b/docs/configurations/grafanacon-2022/index.md index c4dccf005d..7f92b9f4d5 100644 --- a/docs/configurations/grafanacon-2022/index.md +++ b/docs/configurations/grafanacon-2022/index.md @@ -12,6 +12,10 @@ associated_technologies: - mimir --- +# GrafanaCon 2022 Mimir session + +Configuration files in this directory were used during the demo part of [GrafanaCon 2022 talk about Mimir](https://grafana.com/go/grafanaconline/2022/grafana-mimir-migrate-your-metrics-in-minutes/). + # Before you start **Warning:** Following commands will not specify explicit context for `kubectl` commands. Make sure to select correct From 5a78ece61062f2e47f174aa37cb53dabc29164c1 Mon Sep 17 00:00:00 2001 From: zenador Date: Fri, 24 Jun 2022 17:20:44 +0800 Subject: [PATCH 37/63] Add store gateway consistency check errors to errors catalog (#2150) * Add store gateway consistency check errors to errors catalogue * Apply suggestions from code review Co-authored-by: Ursula Kallio * Manually apply code review changes * Apply suggestions from code review Co-authored-by: Marco Pracucci Co-authored-by: Ursula Kallio * Update based on changes from code review * Apply suggestions from code review Co-authored-by: Marco Pracucci Co-authored-by: Ursula Kallio Co-authored-by: Marco Pracucci --- CHANGELOG.md | 2 +- cmd/mimir/config-descriptor.json | 2 +- cmd/mimir/help-all.txt.tmpl | 2 +- .../index.md | 4 +-- .../operators-guide/mimir-runbooks/_index.md | 34 +++++++++++++++++++ pkg/ingester/ingester_test.go | 2 +- pkg/querier/blocks_finder_bucket_index.go | 9 +++-- .../blocks_finder_bucket_index_test.go | 25 ++++++++++++-- pkg/querier/blocks_store_queryable.go | 6 +++- pkg/querier/blocks_store_queryable_test.go | 26 +++++++++++--- pkg/storage/tsdb/config.go | 2 +- pkg/util/globalerror/errors.go | 3 ++ pkg/util/globalerror/errors_test.go | 16 ++++----- 13 files changed, 108 insertions(+), 25 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1b06d59468..54fc3833f2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -24,7 +24,7 @@ * The following metric is exposed to tell how many requests have been rejected: * `cortex_discarded_requests_total` * [ENHANCEMENT] Store-gateway: Add the experimental ability to run requests in a dedicated OS thread pool. This feature can be configured using `-store-gateway.thread-pool-size` and is disabled by default. Replaces the ability to run index header operations in a dedicated thread pool. #1660 #1812 -* [ENHANCEMENT] Improved error messages to make them easier to understand; each now have a unique, global identifier that you can use to look up in the runbooks for more information. #1907 #1919 #1888 #1939 #1984 #2009 #2066 #2104 +* [ENHANCEMENT] Improved error messages to make them easier to understand; each now have a unique, global identifier that you can use to look up in the runbooks for more information. #1907 #1919 #1888 #1939 #1984 #2009 #2066 #2104 #2150 * [ENHANCEMENT] Memberlist KV: incoming messages are now processed on per-key goroutine. This may reduce loss of "maintanance" packets in busy memberlist installations, but use more CPU. New `memberlist_client_received_broadcasts_dropped_total` counter tracks number of dropped per-key messages. #1912 * [ENHANCEMENT] Blocks Storage, Alertmanager, Ruler: add support a prefix to the bucket store (`*_storage.storage_prefix`). This enables using the same bucket for the three components. #1686 #1951 * [ENHANCEMENT] Upgrade Docker base images to `alpine:3.16.0`. #2028 diff --git a/cmd/mimir/config-descriptor.json b/cmd/mimir/config-descriptor.json index 4079f6173d..080e1f8a75 100644 --- a/cmd/mimir/config-descriptor.json +++ b/cmd/mimir/config-descriptor.json @@ -5032,7 +5032,7 @@ "kind": "field", "name": "max_stale_period", "required": false, - "desc": "The maximum allowed age of a bucket index (last updated) before queries start failing because the bucket index is too old. The bucket index is periodically updated by the compactor, while this check is enforced in the querier (at query time).", + "desc": "The maximum allowed age of a bucket index (last updated) before queries start failing because the bucket index is too old. The bucket index is periodically updated by the compactor, and this check is enforced in the querier (at query time).", "fieldValue": null, "fieldDefaultValue": 3600000000000, "fieldFlag": "blocks-storage.bucket-store.bucket-index.max-stale-period", diff --git a/cmd/mimir/help-all.txt.tmpl b/cmd/mimir/help-all.txt.tmpl index 425bbc1833..6066f4a2dd 100644 --- a/cmd/mimir/help-all.txt.tmpl +++ b/cmd/mimir/help-all.txt.tmpl @@ -269,7 +269,7 @@ Usage of ./cmd/mimir/mimir: -blocks-storage.bucket-store.bucket-index.idle-timeout duration How long a unused bucket index should be cached. Once this timeout expires, the unused bucket index is removed from the in-memory cache. This option is used only by querier. (default 1h0m0s) -blocks-storage.bucket-store.bucket-index.max-stale-period duration - The maximum allowed age of a bucket index (last updated) before queries start failing because the bucket index is too old. The bucket index is periodically updated by the compactor, while this check is enforced in the querier (at query time). (default 1h0m0s) + The maximum allowed age of a bucket index (last updated) before queries start failing because the bucket index is too old. The bucket index is periodically updated by the compactor, and this check is enforced in the querier (at query time). (default 1h0m0s) -blocks-storage.bucket-store.bucket-index.update-on-error-interval duration How frequently a bucket index, which previously failed to load, should be tried to load again. This option is used only by querier. (default 1m0s) -blocks-storage.bucket-store.chunk-pool-max-bucket-size-bytes int diff --git a/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md b/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md index f973af1977..920837a2c0 100644 --- a/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md +++ b/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md @@ -3351,8 +3351,8 @@ bucket_store: # (advanced) The maximum allowed age of a bucket index (last updated) before # queries start failing because the bucket index is too old. The bucket - # index is periodically updated by the compactor, while this check is - # enforced in the querier (at query time). + # index is periodically updated by the compactor, and this check is enforced + # in the querier (at query time). # CLI flag: -blocks-storage.bucket-store.bucket-index.max-stale-period [max_stale_period: | default = 1h] diff --git a/docs/sources/operators-guide/mimir-runbooks/_index.md b/docs/sources/operators-guide/mimir-runbooks/_index.md index 8adc187d79..dbd5e23a69 100644 --- a/docs/sources/operators-guide/mimir-runbooks/_index.md +++ b/docs/sources/operators-guide/mimir-runbooks/_index.md @@ -1429,6 +1429,40 @@ How it **works**: - The series must already exist before exemplars can be appended, as we do not create new series upon ingesting exemplars. The series will be created when a sample from it is ingested. +### err-mimir-store-consistency-check-failed + +This error occurs when the querier is unable to fetch some of the expected blocks after multiple retries and connections to different store-gateways. The query fails because some blocks are missing in the queried store-gateways. + +How it **works**: + +- Mimir has been designed to guarantee query results correctness and never return partial query results. Either a query succeeds returning fully consistent results or it fails. +- Queriers, and rulers running with the "internal" evaluation mode, run a consistency check to ensure all expected blocks have been queried from the long-term storage via the store-gateways. +- If any expected block has not been queried via the store-gateways, then the query fails with this error. +- See [Anatomy of a query request]({{< relref "../architecture/components/querier.md#anatomy-of-a-query-request" >}}) to learn more. + +How to **fix** it: + +- Ensure all store-gateways are healthy. +- Ensure all store-gateways are successfully synching owned blocks (see [`MimirStoreGatewayHasNotSyncTheBucket`](#MimirStoreGatewayHasNotSyncTheBucket)). + +### err-mimir-bucket-index-too-old + +This error occurs when a query fails because the bucket index is too old. + +How it **works**: + +- Compactors periodically write a per-tenant file, called the "bucket index", to the object storage. The bucket index contains all known blocks for the given tenant and is updated every `-compactor.cleanup-interval`. +- When a query is executed, queriers and rulers running with the "internal" evaluation mode look up the bucket index to find which blocks should be queried through the store-gateways. +- To ensure all required blocks are queried, queriers and rulers determine how old a bucket index is based on the time that it was last updated by the compactor. +- If the age is older than the maximum stale period that is configured via `-blocks-storage.bucket-store.bucket-index.max-stale-period`, the query fails. +- This circuit breaker ensures that the queriers and rulers do not return any partial query results due to a stale view of the long-term storage. + +How to **fix** it: + +- Ensure the compactor is running successfully (e.g. not crashing, not going out of memory). +- Ensure each compactor replica has successfully updated bucket index of each owned tenant within the double of `-compactor.cleanup-interval` (query below assumes the cleanup interval is set to 15 minutes): + `time() - cortex_compactor_block_cleanup_last_successful_run_timestamp_seconds > 2 * (15 * 60)` + ## Mimir routes by path **Write path**: diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index eb4d655e12..6d241c534d 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -5785,7 +5785,7 @@ func TestNewIngestErrMsgs(t *testing.T) { for testName, tc := range tests { t.Run(testName, func(t *testing.T) { - assert.Equal(t, tc.err.Error(), tc.msg) + assert.Equal(t, tc.msg, tc.err.Error()) }) } } diff --git a/pkg/querier/blocks_finder_bucket_index.go b/pkg/querier/blocks_finder_bucket_index.go index e95174278f..7950acab10 100644 --- a/pkg/querier/blocks_finder_bucket_index.go +++ b/pkg/querier/blocks_finder_bucket_index.go @@ -7,6 +7,7 @@ package querier import ( "context" + "fmt" "time" "github.com/go-kit/log" @@ -18,11 +19,11 @@ import ( "github.com/grafana/mimir/pkg/storage/bucket" "github.com/grafana/mimir/pkg/storage/tsdb/bucketindex" + "github.com/grafana/mimir/pkg/util/globalerror" ) var ( errBucketIndexBlocksFinderNotRunning = errors.New("bucket index blocks finder is not running") - errBucketIndexTooOld = errors.New("bucket index is too old and the last time it was updated exceeds the allowed max staleness") ) type BucketIndexBlocksFinderConfig struct { @@ -72,7 +73,7 @@ func (f *BucketIndexBlocksFinder) GetBlocks(ctx context.Context, userID string, // Ensure the bucket index is not too old. if time.Since(idx.GetUpdatedAt()) > f.cfg.MaxStalePeriod { - return nil, nil, errBucketIndexTooOld + return nil, nil, newBucketIndexTooOldError(idx.GetUpdatedAt(), f.cfg.MaxStalePeriod) } var ( @@ -112,3 +113,7 @@ func (f *BucketIndexBlocksFinder) GetBlocks(ctx context.Context, userID string, return blocks, matchingDeletionMarks, nil } + +func newBucketIndexTooOldError(updatedAt time.Time, maxStalePeriod time.Duration) error { + return errors.New(globalerror.BucketIndexTooOld.Message(fmt.Sprintf("the bucket index is too old. It was last updated at %s, which exceeds the maximum allowed staleness period of %v", updatedAt.UTC().Format(time.RFC3339Nano), maxStalePeriod))) +} diff --git a/pkg/querier/blocks_finder_bucket_index_test.go b/pkg/querier/blocks_finder_bucket_index_test.go index 21d9a2f865..00091becf3 100644 --- a/pkg/querier/blocks_finder_bucket_index_test.go +++ b/pkg/querier/blocks_finder_bucket_index_test.go @@ -192,15 +192,16 @@ func TestBucketIndexBlocksFinder_GetBlocks_BucketIndexIsTooOld(t *testing.T) { bkt, _ := mimir_testutil.PrepareFilesystemBucket(t) finder := prepareBucketIndexBlocksFinder(t, bkt) - require.NoError(t, bucketindex.WriteIndex(ctx, bkt, userID, nil, &bucketindex.Index{ + idx := &bucketindex.Index{ Version: bucketindex.IndexVersion1, Blocks: bucketindex.Blocks{}, BlockDeletionMarks: bucketindex.BlockDeletionMarks{}, UpdatedAt: time.Now().Add(-2 * time.Hour).Unix(), - })) + } + require.NoError(t, bucketindex.WriteIndex(ctx, bkt, userID, nil, idx)) _, _, err := finder.GetBlocks(ctx, userID, 10, 20) - require.Equal(t, errBucketIndexTooOld, err) + require.EqualError(t, err, newBucketIndexTooOldError(idx.GetUpdatedAt(), finder.cfg.MaxStalePeriod).Error()) } func prepareBucketIndexBlocksFinder(t testing.TB, bkt objstore.Bucket) *BucketIndexBlocksFinder { @@ -224,3 +225,21 @@ func prepareBucketIndexBlocksFinder(t testing.TB, bkt objstore.Bucket) *BucketIn return finder } + +func TestBlocksFinderBucketIndexErrMsgs(t *testing.T) { + tests := map[string]struct { + err error + msg string + }{ + "newBucketIndexTooOldError": { + err: newBucketIndexTooOldError(time.Unix(1000000000, 0), time.Hour), + msg: `the bucket index is too old. It was last updated at 2001-09-09T01:46:40Z, which exceeds the maximum allowed staleness period of 1h0m0s (err-mimir-bucket-index-too-old)`, + }, + } + + for testName, tc := range tests { + t.Run(testName, func(t *testing.T) { + assert.Equal(t, tc.msg, tc.err.Error()) + }) + } +} diff --git a/pkg/querier/blocks_store_queryable.go b/pkg/querier/blocks_store_queryable.go index e1b042635e..dffbbafa80 100644 --- a/pkg/querier/blocks_store_queryable.go +++ b/pkg/querier/blocks_store_queryable.go @@ -594,7 +594,11 @@ func (q *blocksStoreQuerier) queryWithConsistencyCheck(ctx context.Context, logg // We've not been able to query all expected blocks after all retries. level.Warn(util_log.WithContext(ctx, logger)).Log("msg", "failed consistency check", "err", err) - return fmt.Errorf("consistency check failed because some blocks were not queried: %s", strings.Join(convertULIDsToString(remainingBlocks), " ")) + return newStoreConsistencyCheckFailedError(remainingBlocks) +} + +func newStoreConsistencyCheckFailedError(remainingBlocks []ulid.ULID) error { + return fmt.Errorf("%v. The non-queried blocks are: %s", globalerror.StoreConsistencyCheckFailed.Message("the consistency check failed because some blocks were not queried"), strings.Join(convertULIDsToString(remainingBlocks), " ")) } // filterBlocksByShard removes blocks that can be safely ignored when using query sharding. We know that block can be safely diff --git a/pkg/querier/blocks_store_queryable_test.go b/pkg/querier/blocks_store_queryable_test.go index 57e749ca9d..d7ee1c1153 100644 --- a/pkg/querier/blocks_store_queryable_test.go +++ b/pkg/querier/blocks_store_queryable_test.go @@ -321,7 +321,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, limits: &blocksStoreLimitsMock{}, queryLimiter: noOpQueryLimiter, - expectedErr: fmt.Errorf("consistency check failed because some blocks were not queried: %s", block2.String()), + expectedErr: newStoreConsistencyCheckFailedError([]ulid.ULID{block2}), }, "multiple store-gateway instances have some missing blocks (consistency check failed)": { finderResult: bucketindex.Blocks{ @@ -347,7 +347,7 @@ func TestBlocksStoreQuerier_Select(t *testing.T) { }, limits: &blocksStoreLimitsMock{}, queryLimiter: noOpQueryLimiter, - expectedErr: fmt.Errorf("consistency check failed because some blocks were not queried: %s %s", block3.String(), block4.String()), + expectedErr: newStoreConsistencyCheckFailedError([]ulid.ULID{block3, block4}), }, "multiple store-gateway instances have some missing blocks but queried from a replica during subsequent attempts": { finderResult: bucketindex.Blocks{ @@ -1148,7 +1148,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { // Second attempt returns an error because there are no other store-gateways left. errors.New("no store-gateway remaining after exclude"), }, - expectedErr: fmt.Sprintf("consistency check failed because some blocks were not queried: %s", block2.String()), + expectedErr: newStoreConsistencyCheckFailedError([]ulid.ULID{block2}).Error(), }, "multiple store-gateway instances have some missing blocks (consistency check failed)": { finderResult: bucketindex.Blocks{ @@ -1190,7 +1190,7 @@ func TestBlocksStoreQuerier_Labels(t *testing.T) { // Second attempt returns an error because there are no other store-gateways left. errors.New("no store-gateway remaining after exclude"), }, - expectedErr: fmt.Sprintf("consistency check failed because some blocks were not queried: %s %s", block3.String(), block4.String()), + expectedErr: newStoreConsistencyCheckFailedError([]ulid.ULID{block3, block4}).Error(), }, "multiple store-gateway instances have some missing blocks but queried from a replica during subsequent attempts": { // Block1 has series1 @@ -2009,3 +2009,21 @@ func valuesFromSeries(name string, series ...labels.Labels) []string { sort.Strings(values) return values } + +func TestBlocksStoreQueryableErrMsgs(t *testing.T) { + tests := map[string]struct { + err error + msg string + }{ + "newStoreConsistencyCheckFailedError": { + err: newStoreConsistencyCheckFailedError([]ulid.ULID{ulid.MustNew(1, nil)}), + msg: `the consistency check failed because some blocks were not queried (err-mimir-store-consistency-check-failed). The non-queried blocks are: 00000000010000000000000000`, + }, + } + + for testName, tc := range tests { + t.Run(testName, func(t *testing.T) { + assert.Equal(t, tc.msg, tc.err.Error()) + }) + } +} diff --git a/pkg/storage/tsdb/config.go b/pkg/storage/tsdb/config.go index b25a6f7045..79a60f5b81 100644 --- a/pkg/storage/tsdb/config.go +++ b/pkg/storage/tsdb/config.go @@ -356,5 +356,5 @@ func (cfg *BucketIndexConfig) RegisterFlagsWithPrefix(f *flag.FlagSet, prefix st f.BoolVar(&cfg.Enabled, prefix+"enabled", true, "If enabled, queriers and store-gateways discover blocks by reading a bucket index (created and updated by the compactor) instead of periodically scanning the bucket.") f.DurationVar(&cfg.UpdateOnErrorInterval, prefix+"update-on-error-interval", time.Minute, "How frequently a bucket index, which previously failed to load, should be tried to load again. This option is used only by querier.") f.DurationVar(&cfg.IdleTimeout, prefix+"idle-timeout", time.Hour, "How long a unused bucket index should be cached. Once this timeout expires, the unused bucket index is removed from the in-memory cache. This option is used only by querier.") - f.DurationVar(&cfg.MaxStalePeriod, prefix+"max-stale-period", time.Hour, "The maximum allowed age of a bucket index (last updated) before queries start failing because the bucket index is too old. The bucket index is periodically updated by the compactor, while this check is enforced in the querier (at query time).") + f.DurationVar(&cfg.MaxStalePeriod, prefix+"max-stale-period", time.Hour, "The maximum allowed age of a bucket index (last updated) before queries start failing because the bucket index is too old. The bucket index is periodically updated by the compactor, and this check is enforced in the querier (at query time).") } diff --git a/pkg/util/globalerror/errors.go b/pkg/util/globalerror/errors.go index 1fa5bb1aa9..00b437e433 100644 --- a/pkg/util/globalerror/errors.go +++ b/pkg/util/globalerror/errors.go @@ -57,6 +57,9 @@ const ( SampleOutOfOrder ID = "sample-out-of-order" SampleDuplicateTimestamp ID = "sample-duplicate-timestamp" ExemplarSeriesMissing ID = "exemplar-series-missing" + + StoreConsistencyCheckFailed ID = "store-consistency-check-failed" + BucketIndexTooOld ID = "bucket-index-too-old" ) // Message returns the provided msg, appending the error id. diff --git a/pkg/util/globalerror/errors_test.go b/pkg/util/globalerror/errors_test.go index 1eda1c1533..fa99e2000a 100644 --- a/pkg/util/globalerror/errors_test.go +++ b/pkg/util/globalerror/errors_test.go @@ -17,22 +17,22 @@ func TestID_Message(t *testing.T) { func TestID_MessageWithLimitConfig(t *testing.T) { for _, tc := range []struct { - actual string expected string + actual string }{ { - actual: "an error (err-mimir-missing-metric-name). You can adjust the related per-tenant limit by configuring -my-flag1, or by contacting your service administrator.", - expected: MissingMetricName.MessageWithLimitConfig("an error", "my-flag1"), + expected: "an error (err-mimir-missing-metric-name). You can adjust the related per-tenant limit by configuring -my-flag1, or by contacting your service administrator.", + actual: MissingMetricName.MessageWithLimitConfig("an error", "my-flag1"), }, { - actual: "an error (err-mimir-missing-metric-name). You can adjust the related per-tenant limits by configuring -my-flag1 and -my-flag2, or by contacting your service administrator.", - expected: MissingMetricName.MessageWithLimitConfig("an error", "my-flag1", "my-flag2"), + expected: "an error (err-mimir-missing-metric-name). You can adjust the related per-tenant limits by configuring -my-flag1 and -my-flag2, or by contacting your service administrator.", + actual: MissingMetricName.MessageWithLimitConfig("an error", "my-flag1", "my-flag2"), }, { - actual: "an error (err-mimir-missing-metric-name). You can adjust the related per-tenant limits by configuring -my-flag1, -my-flag2 and -my-flag3, or by contacting your service administrator.", - expected: MissingMetricName.MessageWithLimitConfig("an error", "my-flag1", "my-flag2", "my-flag3"), + expected: "an error (err-mimir-missing-metric-name). You can adjust the related per-tenant limits by configuring -my-flag1, -my-flag2 and -my-flag3, or by contacting your service administrator.", + actual: MissingMetricName.MessageWithLimitConfig("an error", "my-flag1", "my-flag2", "my-flag3"), }, } { - assert.Equal(t, tc.actual, tc.expected) + assert.Equal(t, tc.expected, tc.actual) } } From deed107e25099537d6cd305b36a1e6c184b543bf Mon Sep 17 00:00:00 2001 From: Dimitar Dimitrov Date: Fri, 24 Jun 2022 11:38:31 +0200 Subject: [PATCH 38/63] Remove deprecated ruler endpoints (#2182) * Remove deprecated ruler endpoints Removes /api/v1/rules* and /prometheus/rules* endpoints in preparation for the 2.2.0 release. Replaces their usages throughout the repo with /prometheus/config/v1/rules* Signed-off-by: Dimitar Dimitrov * Add changelog entry Signed-off-by: Dimitar Dimitrov * Fix failing tests Signed-off-by: Dimitar Dimitrov * Add changelog entry for mimirtool Signed-off-by: Dimitar Dimitrov --- CHANGELOG.md | 2 ++ cmd/query-tee/main.go | 2 +- cmd/query-tee/main_test.go | 4 +-- .../configuring/about-versioning.md | 3 -- .../reference-http-api/index.md | 36 ------------------- .../operators-guide/tools/query-tee.md | 2 +- integration/e2emimir/client.go | 10 +++--- integration/ruler_test.go | 13 +++---- .../helm/charts/mimir-distributed/values.yaml | 6 ---- .../templates/nginx/nginx-configmap.yaml | 6 ---- pkg/api/api.go | 18 ---------- pkg/mimirtool/client/client.go | 4 +-- pkg/mimirtool/client/client_test.go | 36 +++++++++---------- pkg/mimirtool/client/rules_test.go | 10 +++--- pkg/mimirtool/commands/rules.go | 2 +- pkg/ruler/api_test.go | 26 +++++++------- 16 files changed, 57 insertions(+), 123 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 54fc3833f2..8aa40bb720 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -17,6 +17,7 @@ * [CHANGE] Blocks uploaded by ingester no longer contain `__org_id__` label. Compactor now ignores this label and will compact blocks with and without this label together. `mimirconvert` tool will remove the label from blocks as "unknown" label. #1972 * [CHANGE] Querier: deprecated `-querier.shuffle-sharding-ingesters-lookback-period`, instead adding `-querier.shuffle-sharding-ingesters-enabled` to enable or disable shuffle sharding on the read path. The value of `-querier.query-ingesters-within` is now used internally for shuffle sharding lookback. #2110 * [CHANGE] Memberlist: `-memberlist.abort-if-join-fails` now defaults to false. Previously it defaulted to true. #2168 +* [CHANGE] Ruler: `/api/v1/rules*` and `/prometheus/rules*` configuration endpoints are removed. Use `/prometheus/config/v1/rules*`. #2182 * [ENHANCEMENT] Distributor: Added limit to prevent tenants from sending excessive number of requests: #1843 * The following CLI flags (and their respective YAML config options) have been added: * `-distributor.request-rate-limit` @@ -87,6 +88,7 @@ ### Mimirtool +* [CHANGE] mimirtool rules: `--use-legacy-routes` now toggles between using `/prometheus/config/v1/rules` (default) and `/api/v1/rules` (legacy) endpoints. #2182 * [FEATURE] Added bearer token support for when Mimir is behind a gateway authenticating by bearer token. #2146 * [BUGFIX] mimirtool analyze: Fix dashboard JSON unmarshalling errors (#1840). #1973 diff --git a/cmd/query-tee/main.go b/cmd/query-tee/main.go index 632703c652..e651ad58b7 100644 --- a/cmd/query-tee/main.go +++ b/cmd/query-tee/main.go @@ -86,7 +86,7 @@ func mimirReadRoutes(cfg Config) []querytee.Route { {Path: prefix + "/api/v1/label/{name}/values", RouteName: "api_v1_label_name_values", Methods: []string{"GET", "POST"}, ResponseComparator: nil}, {Path: prefix + "/api/v1/series", RouteName: "api_v1_series", Methods: []string{"GET", "POST"}, ResponseComparator: nil}, {Path: prefix + "/api/v1/metadata", RouteName: "api_v1_metadata", Methods: []string{"GET", "POST"}, ResponseComparator: nil}, - {Path: prefix + "/api/v1/rules", RouteName: "api_v1_rules", Methods: []string{"GET", "POST"}, ResponseComparator: nil}, + {Path: prefix + "/prometheus/config/v1/rules", RouteName: "prometheus_config_v1_rules", Methods: []string{"GET", "POST"}, ResponseComparator: nil}, {Path: prefix + "/api/v1/alerts", RouteName: "api_v1_alerts", Methods: []string{"GET", "POST"}, ResponseComparator: nil}, } } diff --git a/cmd/query-tee/main_test.go b/cmd/query-tee/main_test.go index 2705aab3e8..513e1772a0 100644 --- a/cmd/query-tee/main_test.go +++ b/cmd/query-tee/main_test.go @@ -15,11 +15,11 @@ import ( func TestMimirReadRoutes(t *testing.T) { routes := mimirReadRoutes(Config{PathPrefix: ""}) for _, r := range routes { - assert.True(t, strings.HasPrefix(r.Path, "/api/v1/")) + assert.True(t, strings.HasPrefix(r.Path, "/api/v1/") || strings.HasPrefix(r.Path, "/prometheus/")) } routes = mimirReadRoutes(Config{PathPrefix: "/some/random/prefix///"}) for _, r := range routes { - assert.True(t, strings.HasPrefix(r.Path, "/some/random/prefix/api/v1/")) + assert.Regexp(t, "/some/random/prefix/[a-z].*", r.Path) } } diff --git a/docs/sources/operators-guide/configuring/about-versioning.md b/docs/sources/operators-guide/configuring/about-versioning.md index d459f9ca5d..c113726457 100644 --- a/docs/sources/operators-guide/configuring/about-versioning.md +++ b/docs/sources/operators-guide/configuring/about-versioning.md @@ -96,6 +96,3 @@ The following features are currently deprecated: - Ingester: - `-blocks-storage.tsdb.isolation-enabled` CLI flag and `isolation_enabled` YAML config parameter. This will be removed in version 2.3.0. - `active_series_custom_trackers` YAML config parameter in the ingester block. The configuration has been moved to limit config, the ingester config will be removed in version 2.3.0. -- Ruler: - - `/api/v1/rules/**` configuration endpoints. These will be removed in version 2.2.0. Use their `/config/v1/rules/**` equivalents instead. - - `/rules/**` configuration endpoints. These will be removed in version 2.2.0. Use their `/config/v1/rules/**` equivalents instead. diff --git a/docs/sources/operators-guide/reference-http-api/index.md b/docs/sources/operators-guide/reference-http-api/index.md index f02bdc9b37..a8f6d0c319 100644 --- a/docs/sources/operators-guide/reference-http-api/index.md +++ b/docs/sources/operators-guide/reference-http-api/index.md @@ -552,12 +552,6 @@ Requires [authentication](#authentication). ``` GET /config/v1/rules - -# Deprecated; will be removed in Mimir v2.2.0 -GET /api/v1/rules - -# Deprecated; will be removed in Mimir v2.2.0 -GET /rules ``` List all rules configured for the authenticated tenant. This endpoint returns a YAML dictionary with all the rule groups for each namespace and `200` status code on success. @@ -620,12 +614,6 @@ Requires [authentication](#authentication). ``` GET /config/v1/rules/{namespace} - -# Deprecated; will be removed in Mimir v2.2.0 -GET /api/v1/rules/{namespace} - -# Deprecated; will be removed in Mimir v2.2.0 -GET /rules/{namespace} ``` Returns the rule groups defined for a given namespace. @@ -657,12 +645,6 @@ rules: ``` GET /config/v1/rules/{namespace}/{groupName} - -# Deprecated; will be removed in Mimir v2.2.0 -GET /api/v1/rules/{namespace}/{groupName} - -# Deprecated; will be removed in Mimir v2.2.0 -GET /rules/{namespace}/{groupName} ``` Returns the rule group matching the request namespace and group name. @@ -675,12 +657,6 @@ Requires [authentication](#authentication). ``` POST //config/v1/rules/{namespace} - -# Deprecated; will be removed in Mimir v2.2.0 -POST /api/v1/rules/{namespace} - -# Deprecated; will be removed in Mimir v2.2.0 -POST /rules/{namespace} ``` Creates or updates a rule group. @@ -709,12 +685,6 @@ rules: ``` DELETE //config/v1/rules/{namespace}/{groupName} - -# Deprecated; will be removed in Mimir v2.2.0 -DELETE /api/v1/rules/{namespace}/{groupName} - -# Deprecated; will be removed in Mimir v2.2.0 -DELETE /rules/{namespace}/{groupName} ``` Deletes a rule group by namespace and group name. This endpoints returns `202` on success. @@ -727,12 +697,6 @@ Requires [authentication](#authentication). ``` DELETE //config/v1/rules/{namespace} - -# Deprecated; will be removed in Mimir v2.2.0 -DELETE /api/v1/rules/{namespace} - -# Deprecated; will be removed in Mimir v2.2.0 -DELETE /rules/{namespace} ``` Deletes all the rule groups in a namespace (including the namespace itself). This endpoint returns `202` on success. diff --git a/docs/sources/operators-guide/tools/query-tee.md b/docs/sources/operators-guide/tools/query-tee.md index efadd40286..b3ca317069 100644 --- a/docs/sources/operators-guide/tools/query-tee.md +++ b/docs/sources/operators-guide/tools/query-tee.md @@ -56,7 +56,7 @@ The following Prometheus API endpoints are supported by `query-tee`: - `GET /api/v1/series` - `GET /api/v1/metadata` - `GET /api/v1/alerts` -- `GET /api/v1/rules` +- `GET /prometheus/config/v1/rules` You can configure the `` by setting the `-server.path-prefix` flag, which defaults to an empty string. diff --git a/integration/e2emimir/client.go b/integration/e2emimir/client.go index 34789f8b0f..6cc9ef664e 100644 --- a/integration/e2emimir/client.go +++ b/integration/e2emimir/client.go @@ -310,7 +310,7 @@ func (c *Client) GetPrometheusRules() ([]*ruler.RuleGroup, error) { // GetRuleGroups gets the configured rule groups from the ruler. func (c *Client) GetRuleGroups() (map[string][]rulefmt.RuleGroup, error) { // Create HTTP request - req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/api/v1/rules", c.rulerAddress), nil) + req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/prometheus/config/v1/rules", c.rulerAddress), nil) if err != nil { return nil, err } @@ -350,7 +350,7 @@ func (c *Client) SetRuleGroup(rulegroup rulefmt.RuleGroup, namespace string) err } // Create HTTP request - req, err := http.NewRequest("POST", fmt.Sprintf("http://%s/api/v1/rules/%s", c.rulerAddress, url.PathEscape(namespace)), bytes.NewReader(data)) + req, err := http.NewRequest("POST", fmt.Sprintf("http://%s/prometheus/config/v1/rules/%s", c.rulerAddress, url.PathEscape(namespace)), bytes.NewReader(data)) if err != nil { return err } @@ -379,7 +379,7 @@ func (c *Client) SetRuleGroup(rulegroup rulefmt.RuleGroup, namespace string) err // GetRuleGroup gets a rule group. func (c *Client) GetRuleGroup(namespace string, groupName string) (*http.Response, error) { // Create HTTP request - req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/api/v1/rules/%s/%s", c.rulerAddress, url.PathEscape(namespace), url.PathEscape(groupName)), nil) + req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/prometheus/config/v1/rules/%s/%s", c.rulerAddress, url.PathEscape(namespace), url.PathEscape(groupName)), nil) if err != nil { return nil, err } @@ -397,7 +397,7 @@ func (c *Client) GetRuleGroup(namespace string, groupName string) (*http.Respons // DeleteRuleGroup deletes a rule group. func (c *Client) DeleteRuleGroup(namespace string, groupName string) error { // Create HTTP request - req, err := http.NewRequest("DELETE", fmt.Sprintf("http://%s/api/v1/rules/%s/%s", c.rulerAddress, url.PathEscape(namespace), url.PathEscape(groupName)), nil) + req, err := http.NewRequest("DELETE", fmt.Sprintf("http://%s/prometheus/config/v1/rules/%s/%s", c.rulerAddress, url.PathEscape(namespace), url.PathEscape(groupName)), nil) if err != nil { return err } @@ -421,7 +421,7 @@ func (c *Client) DeleteRuleGroup(namespace string, groupName string) error { // DeleteRuleNamespace deletes all the rule groups (and the namespace itself). func (c *Client) DeleteRuleNamespace(namespace string) error { // Create HTTP request - req, err := http.NewRequest("DELETE", fmt.Sprintf("http://%s/api/v1/rules/%s", c.rulerAddress, url.PathEscape(namespace)), nil) + req, err := http.NewRequest("DELETE", fmt.Sprintf("http://%s/prometheus/config/v1/rules/%s", c.rulerAddress, url.PathEscape(namespace)), nil) if err != nil { return err } diff --git a/integration/ruler_test.go b/integration/ruler_test.go index 61a8ca2c5c..bd00ad3201 100644 --- a/integration/ruler_test.go +++ b/integration/ruler_test.go @@ -98,7 +98,7 @@ func TestRulerAPI(t *testing.T) { require.Equal(t, retrievedNamespace[0].Name, ruleGroup.Name) // Test compression by inspecting the response Headers - req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/api/v1/rules", ruler.HTTPEndpoint()), nil) + req, err := http.NewRequest("GET", fmt.Sprintf("http://%s/prometheus/config/v1/rules", ruler.HTTPEndpoint()), nil) require.NoError(t, err) req.Header.Set("X-Scope-OrgID", "user-1") @@ -988,12 +988,17 @@ func TestRulerEnableAPIs(t *testing.T) { { name: "API is enabled", apiEnabled: true, - expectedRegisteredEndpoints: [][2]string{ // not going to test GET /api/v1/rules/my_namespace/my_group because it requires creating a rule group {http.MethodGet, "/prometheus/api/v1/alerts"}, {http.MethodGet, "/prometheus/api/v1/rules"}, + {http.MethodGet, "/prometheus/config/v1/rules"}, + {http.MethodGet, "/prometheus/config/v1/rules/my_namespace"}, + {http.MethodPost, "/prometheus/config/v1/rules/my_namespace"}, + }, + + expectedMissingEndpoints: [][2]string{ {http.MethodGet, "/api/v1/rules"}, {http.MethodGet, "/api/v1/rules/my_namespace"}, {http.MethodPost, "/api/v1/rules/my_namespace"}, @@ -1001,10 +1006,6 @@ func TestRulerEnableAPIs(t *testing.T) { {http.MethodGet, "/prometheus/rules"}, {http.MethodGet, "/prometheus/rules/my_namespace"}, {http.MethodPost, "/prometheus/rules/my_namespace"}, - - {http.MethodGet, "/prometheus/config/v1/rules"}, - {http.MethodGet, "/prometheus/config/v1/rules/my_namespace"}, - {http.MethodPost, "/prometheus/config/v1/rules/my_namespace"}, }, }, } diff --git a/operations/helm/charts/mimir-distributed/values.yaml b/operations/helm/charts/mimir-distributed/values.yaml index 25f1e37e1a..c90a853a97 100644 --- a/operations/helm/charts/mimir-distributed/values.yaml +++ b/operations/helm/charts/mimir-distributed/values.yaml @@ -1587,15 +1587,9 @@ nginx: proxy_pass http://{{ template "mimir.fullname" . }}-ruler.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:{{ include "mimir.serverHttpListenPort" . }}$request_uri; } - location /api/v1/rules { - proxy_pass http://{{ template "mimir.fullname" . }}-ruler.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:{{ include "mimir.serverHttpListenPort" . }}$request_uri; - } location {{ template "mimir.prometheusHttpPrefix" . }}/api/v1/alerts { proxy_pass http://{{ template "mimir.fullname" . }}-ruler.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:{{ include "mimir.serverHttpListenPort" . }}$request_uri; } - location {{ template "mimir.prometheusHttpPrefix" . }}/rules { - proxy_pass http://{{ template "mimir.fullname" . }}-ruler.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:{{ include "mimir.serverHttpListenPort" . }}$request_uri; - } location = /ruler/ring { proxy_pass http://{{ template "mimir.fullname" . }}-ruler.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:{{ include "mimir.serverHttpListenPort" . }}$request_uri; } diff --git a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml index 54c5b2f4ad..fe97d83067 100644 --- a/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml +++ b/operations/helm/tests/test-oss-values-generated/mimir-distributed/templates/nginx/nginx-configmap.yaml @@ -82,15 +82,9 @@ data: proxy_pass http://test-oss-values-mimir-ruler.citestns.svc.cluster.local:8080$request_uri; } - location /api/v1/rules { - proxy_pass http://test-oss-values-mimir-ruler.citestns.svc.cluster.local:8080$request_uri; - } location /prometheus/api/v1/alerts { proxy_pass http://test-oss-values-mimir-ruler.citestns.svc.cluster.local:8080$request_uri; } - location /prometheus/rules { - proxy_pass http://test-oss-values-mimir-ruler.citestns.svc.cluster.local:8080$request_uri; - } location = /ruler/ring { proxy_pass http://test-oss-values-mimir-ruler.citestns.svc.cluster.local:8080$request_uri; } diff --git a/pkg/api/api.go b/pkg/api/api.go index 19df89013e..1c7075ec91 100644 --- a/pkg/api/api.go +++ b/pkg/api/api.go @@ -305,24 +305,6 @@ func (a *API) RegisterRulerAPI(r *ruler.API, configAPIEnabled bool) { a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/api/v1/alerts"), http.HandlerFunc(r.PrometheusAlerts), true, true, "GET") if configAPIEnabled { - // Ruler API Routes - // TODO remove the /api/v1/rules/** endpoints in Mimir 2.2.0 as agreed in https://github.com/grafana/mimir/pull/763#discussion_r808270581 - a.RegisterDeprecatedRoute("/api/v1/rules", http.HandlerFunc(r.ListRules), true, true, "GET") - a.RegisterDeprecatedRoute("/api/v1/rules/{namespace}", http.HandlerFunc(r.ListRules), true, true, "GET") - a.RegisterDeprecatedRoute("/api/v1/rules/{namespace}/{groupName}", http.HandlerFunc(r.GetRuleGroup), true, true, "GET") - a.RegisterDeprecatedRoute("/api/v1/rules/{namespace}", http.HandlerFunc(r.CreateRuleGroup), true, true, "POST") - a.RegisterDeprecatedRoute("/api/v1/rules/{namespace}/{groupName}", http.HandlerFunc(r.DeleteRuleGroup), true, true, "DELETE") - a.RegisterDeprecatedRoute("/api/v1/rules/{namespace}", http.HandlerFunc(r.DeleteNamespace), true, true, "DELETE") - - // Configuration endpoints with Prometheus prefix, so we keep Prometheus-compatible EPs and config EPs under the same prefix. - // TODO remove the /v1/rules/** endpoints in Mimir 2.2.0 as agreed in https://github.com/grafana/mimir/pull/1222#issuecomment-1046759965 - a.RegisterDeprecatedRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/rules"), http.HandlerFunc(r.ListRules), true, true, "GET") - a.RegisterDeprecatedRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/rules/{namespace}"), http.HandlerFunc(r.ListRules), true, true, "GET") - a.RegisterDeprecatedRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/rules/{namespace}/{groupName}"), http.HandlerFunc(r.GetRuleGroup), true, true, "GET") - a.RegisterDeprecatedRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/rules/{namespace}"), http.HandlerFunc(r.CreateRuleGroup), true, true, "POST") - a.RegisterDeprecatedRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/rules/{namespace}/{groupName}"), http.HandlerFunc(r.DeleteRuleGroup), true, true, "DELETE") - a.RegisterDeprecatedRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/rules/{namespace}"), http.HandlerFunc(r.DeleteNamespace), true, true, "DELETE") - // Long-term maintained configuration API routes a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/config/v1/rules"), http.HandlerFunc(r.ListRules), true, true, "GET") a.RegisterRoute(path.Join(a.cfg.PrometheusHTTPPrefix, "/config/v1/rules/{namespace}"), http.HandlerFunc(r.ListRules), true, true, "GET") diff --git a/pkg/mimirtool/client/client.go b/pkg/mimirtool/client/client.go index f886890cc6..a098eae451 100644 --- a/pkg/mimirtool/client/client.go +++ b/pkg/mimirtool/client/client.go @@ -22,8 +22,8 @@ import ( ) const ( - rulerAPIPath = "/api/v1/rules" - legacyAPIPath = "/api/prom/rules" + rulerAPIPath = "/prometheus/config/v1/rules" + legacyAPIPath = "/api/v1/rules" ) var ( diff --git a/pkg/mimirtool/client/client_test.go b/pkg/mimirtool/client/client_test.go index 7a87b8cc0f..76cc71842c 100644 --- a/pkg/mimirtool/client/client_test.go +++ b/pkg/mimirtool/client/client_test.go @@ -23,66 +23,66 @@ func TestBuildURL(t *testing.T) { }{ { name: "builds the correct URL with a trailing slash", - path: "/api/v1/rules", + path: "/prometheus/config/v1/rules", method: http.MethodPost, url: "http://mimirurl.com/", - resultURL: "http://mimirurl.com/api/v1/rules", + resultURL: "http://mimirurl.com/prometheus/config/v1/rules", }, { name: "builds the correct URL without a trailing slash", - path: "/api/v1/rules", + path: "/prometheus/config/v1/rules", method: http.MethodPost, url: "http://mimirurl.com", - resultURL: "http://mimirurl.com/api/v1/rules", + resultURL: "http://mimirurl.com/prometheus/config/v1/rules", }, { name: "builds the correct URL when the base url has a path", - path: "/api/v1/rules", + path: "/prometheus/config/v1/rules", method: http.MethodPost, url: "http://mimirurl.com/apathto", - resultURL: "http://mimirurl.com/apathto/api/v1/rules", + resultURL: "http://mimirurl.com/apathto/prometheus/config/v1/rules", }, { name: "builds the correct URL when the base url has a path with trailing slash", - path: "/api/v1/rules", + path: "/prometheus/config/v1/rules", method: http.MethodPost, url: "http://mimirurl.com/apathto/", - resultURL: "http://mimirurl.com/apathto/api/v1/rules", + resultURL: "http://mimirurl.com/apathto/prometheus/config/v1/rules", }, { name: "builds the correct URL with a trailing slash and the target path contains special characters", - path: "/api/v1/rules/%20%2Fspace%F0%9F%8D%BB", + path: "/prometheus/config/v1/rules/%20%2Fspace%F0%9F%8D%BB", method: http.MethodPost, url: "http://mimirurl.com/", - resultURL: "http://mimirurl.com/api/v1/rules/%20%2Fspace%F0%9F%8D%BB", + resultURL: "http://mimirurl.com/prometheus/config/v1/rules/%20%2Fspace%F0%9F%8D%BB", }, { name: "builds the correct URL without a trailing slash and the target path contains special characters", - path: "/api/v1/rules/%20%2Fspace%F0%9F%8D%BB", + path: "/prometheus/config/v1/rules/%20%2Fspace%F0%9F%8D%BB", method: http.MethodPost, url: "http://mimirurl.com", - resultURL: "http://mimirurl.com/api/v1/rules/%20%2Fspace%F0%9F%8D%BB", + resultURL: "http://mimirurl.com/prometheus/config/v1/rules/%20%2Fspace%F0%9F%8D%BB", }, { name: "builds the correct URL when the base url has a path and the target path contains special characters", - path: "/api/v1/rules/%20%2Fspace%F0%9F%8D%BB", + path: "/prometheus/config/v1/rules/%20%2Fspace%F0%9F%8D%BB", method: http.MethodPost, url: "http://mimirurl.com/apathto", - resultURL: "http://mimirurl.com/apathto/api/v1/rules/%20%2Fspace%F0%9F%8D%BB", + resultURL: "http://mimirurl.com/apathto/prometheus/config/v1/rules/%20%2Fspace%F0%9F%8D%BB", }, { name: "builds the correct URL when the base url has a path and the target path starts with a escaped slash", - path: "/api/v1/rules/%2F-first-char-slash", + path: "/prometheus/config/v1/rules/%2F-first-char-slash", method: http.MethodPost, url: "http://mimirurl.com/apathto", - resultURL: "http://mimirurl.com/apathto/api/v1/rules/%2F-first-char-slash", + resultURL: "http://mimirurl.com/apathto/prometheus/config/v1/rules/%2F-first-char-slash", }, { name: "builds the correct URL when the base url has a path and the target path ends with a escaped slash", - path: "/api/v1/rules/last-char-slash%2F", + path: "/prometheus/config/v1/rules/last-char-slash%2F", method: http.MethodPost, url: "http://mimirurl.com/apathto", - resultURL: "http://mimirurl.com/apathto/api/v1/rules/last-char-slash%2F", + resultURL: "http://mimirurl.com/apathto/prometheus/config/v1/rules/last-char-slash%2F", }, } diff --git a/pkg/mimirtool/client/rules_test.go b/pkg/mimirtool/client/rules_test.go index 023429fd00..4da1c08201 100644 --- a/pkg/mimirtool/client/rules_test.go +++ b/pkg/mimirtool/client/rules_test.go @@ -41,31 +41,31 @@ func TestMimirClient_X(t *testing.T) { test: "regular-characters", namespace: "my-namespace", name: "my-name", - expURLPath: "/api/v1/rules/my-namespace/my-name", + expURLPath: "/prometheus/config/v1/rules/my-namespace/my-name", }, { test: "special-characters-spaces", namespace: "My: Namespace", name: "My: Name", - expURLPath: "/api/v1/rules/My:%20Namespace/My:%20Name", + expURLPath: "/prometheus/config/v1/rules/My:%20Namespace/My:%20Name", }, { test: "special-characters-slashes", namespace: "My/Namespace", name: "My/Name", - expURLPath: "/api/v1/rules/My%2FNamespace/My%2FName", + expURLPath: "/prometheus/config/v1/rules/My%2FNamespace/My%2FName", }, { test: "special-characters-slash-first", namespace: "My/Namespace", name: "/first-char-slash", - expURLPath: "/api/v1/rules/My%2FNamespace/%2Ffirst-char-slash", + expURLPath: "/prometheus/config/v1/rules/My%2FNamespace/%2Ffirst-char-slash", }, { test: "special-characters-slash-first", namespace: "My/Namespace", name: "last-char-slash/", - expURLPath: "/api/v1/rules/My%2FNamespace/last-char-slash%2F", + expURLPath: "/prometheus/config/v1/rules/My%2FNamespace/last-char-slash%2F", }, } { t.Run(tc.test, func(t *testing.T) { diff --git a/pkg/mimirtool/commands/rules.go b/pkg/mimirtool/commands/rules.go index e51516ae5d..858ae34dcc 100644 --- a/pkg/mimirtool/commands/rules.go +++ b/pkg/mimirtool/commands/rules.go @@ -143,7 +143,7 @@ func (r *RuleCommand) Register(app *kingpin.Application, envVars EnvVarNames) { Required(). StringVar(&r.ClientConfig.ID) - c.Flag("use-legacy-routes", "If set, the API requests to Grafana Mimir use the legacy /api/prom/ routes; alternatively, set "+envVars.UseLegacyRoutes+"."). + c.Flag("use-legacy-routes", "If set, the API requests to Grafana Mimir use the legacy /api/v1/rules routes instead of /prometheus/config/v1/rules; alternatively, set "+envVars.UseLegacyRoutes+"."). Default("false"). Envar(envVars.UseLegacyRoutes). BoolVar(&r.ClientConfig.UseLegacyRoutes) diff --git a/pkg/ruler/api_test.go b/pkg/ruler/api_test.go index 1b09990de7..ea2051462b 100644 --- a/pkg/ruler/api_test.go +++ b/pkg/ruler/api_test.go @@ -301,10 +301,10 @@ rules: for _, tt := range tc { t.Run(tt.name, func(t *testing.T) { router := mux.NewRouter() - router.Path("/api/v1/rules/{namespace}").Methods("POST").HandlerFunc(a.CreateRuleGroup) - router.Path("/api/v1/rules/{namespace}/{groupName}").Methods("GET").HandlerFunc(a.GetRuleGroup) + router.Path("/prometheus/config/v1/rules/{namespace}").Methods("POST").HandlerFunc(a.CreateRuleGroup) + router.Path("/prometheus/config/v1/rules/{namespace}/{groupName}").Methods("GET").HandlerFunc(a.GetRuleGroup) // POST - req := requestFor(t, http.MethodPost, "https://localhost:8080/api/v1/rules/namespace", strings.NewReader(tt.input), "user1") + req := requestFor(t, http.MethodPost, "https://localhost:8080/prometheus/config/v1/rules/namespace", strings.NewReader(tt.input), "user1") w := httptest.NewRecorder() router.ServeHTTP(w, req) @@ -312,7 +312,7 @@ rules: if tt.err == nil { // GET - req = requestFor(t, http.MethodGet, "https://localhost:8080/api/v1/rules/namespace/test", nil, "user1") + req = requestFor(t, http.MethodGet, "https://localhost:8080/prometheus/config/v1/rules/namespace/test", nil, "user1") w = httptest.NewRecorder() router.ServeHTTP(w, req) @@ -373,11 +373,11 @@ func TestRuler_DeleteNamespace(t *testing.T) { a := NewAPI(r, r.store, log.NewNopLogger()) router := mux.NewRouter() - router.Path("/api/v1/rules/{namespace}").Methods(http.MethodDelete).HandlerFunc(a.DeleteNamespace) - router.Path("/api/v1/rules/{namespace}/{groupName}").Methods(http.MethodGet).HandlerFunc(a.GetRuleGroup) + router.Path("/prometheus/config/v1/rules/{namespace}").Methods(http.MethodDelete).HandlerFunc(a.DeleteNamespace) + router.Path("/prometheus/config/v1/rules/{namespace}/{groupName}").Methods(http.MethodGet).HandlerFunc(a.GetRuleGroup) // Verify namespace1 rules are there. - req := requestFor(t, http.MethodGet, "https://localhost:8080/api/v1/rules/namespace1/group1", nil, "user1") + req := requestFor(t, http.MethodGet, "https://localhost:8080/prometheus/config/v1/rules/namespace1/group1", nil, "user1") w := httptest.NewRecorder() router.ServeHTTP(w, req) @@ -385,7 +385,7 @@ func TestRuler_DeleteNamespace(t *testing.T) { require.Equal(t, "name: group1\ninterval: 1m\nrules:\n - record: UP_RULE\n expr: up\n - alert: UP_ALERT\n expr: up < 1\n", w.Body.String()) // Delete namespace1 - req = requestFor(t, http.MethodDelete, "https://localhost:8080/api/v1/rules/namespace1", nil, "user1") + req = requestFor(t, http.MethodDelete, "https://localhost:8080/prometheus/config/v1/rules/namespace1", nil, "user1") w = httptest.NewRecorder() router.ServeHTTP(w, req) @@ -393,7 +393,7 @@ func TestRuler_DeleteNamespace(t *testing.T) { require.Equal(t, "{\"status\":\"success\",\"data\":null,\"errorType\":\"\",\"error\":\"\"}", w.Body.String()) // On Partial failures - req = requestFor(t, http.MethodDelete, "https://localhost:8080/api/v1/rules/namespace2", nil, "user1") + req = requestFor(t, http.MethodDelete, "https://localhost:8080/prometheus/config/v1/rules/namespace2", nil, "user1") w = httptest.NewRecorder() router.ServeHTTP(w, req) @@ -442,9 +442,9 @@ rules: for _, tt := range tc { t.Run(tt.name, func(t *testing.T) { router := mux.NewRouter() - router.Path("/api/v1/rules/{namespace}").Methods("POST").HandlerFunc(a.CreateRuleGroup) + router.Path("/prometheus/config/v1/rules/{namespace}").Methods("POST").HandlerFunc(a.CreateRuleGroup) // POST - req := requestFor(t, http.MethodPost, "https://localhost:8080/api/v1/rules/namespace", strings.NewReader(tt.input), "user1") + req := requestFor(t, http.MethodPost, "https://localhost:8080/prometheus/config/v1/rules/namespace", strings.NewReader(tt.input), "user1") w := httptest.NewRecorder() router.ServeHTTP(w, req) @@ -499,12 +499,12 @@ rules: // define once so the requests build on each other so the number of rules can be tested router := mux.NewRouter() - router.Path("/api/v1/rules/{namespace}").Methods("POST").HandlerFunc(a.CreateRuleGroup) + router.Path("/prometheus/config/v1/rules/{namespace}").Methods("POST").HandlerFunc(a.CreateRuleGroup) for _, tt := range tc { t.Run(tt.name, func(t *testing.T) { // POST - req := requestFor(t, http.MethodPost, "https://localhost:8080/api/v1/rules/namespace", strings.NewReader(tt.input), "user1") + req := requestFor(t, http.MethodPost, "https://localhost:8080/prometheus/config/v1/rules/namespace", strings.NewReader(tt.input), "user1") w := httptest.NewRecorder() router.ServeHTTP(w, req) From 92e72a5bee7105135645089e88cc07617ff75296 Mon Sep 17 00:00:00 2001 From: Steve Simpson Date: Fri, 24 Jun 2022 11:56:24 +0200 Subject: [PATCH 39/63] Ruler: Remove -ruler.query-frontend.timeout and use -querier.timeout instead. (#2222) --- CHANGELOG.md | 2 +- cmd/mimir/config-descriptor.json | 12 +----------- cmd/mimir/help-all.txt.tmpl | 4 +--- cmd/mimir/help.txt.tmpl | 4 +--- .../reference-configuration-parameters/index.md | 7 ++----- pkg/mimir/modules.go | 2 +- pkg/querier/engine/config.go | 2 +- pkg/ruler/remotequerier.go | 5 ----- 8 files changed, 8 insertions(+), 30 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 8aa40bb720..e9bf43f9f8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -42,7 +42,7 @@ * [BUGFIX] Fix panic at startup when Mimir is running in monolithic mode and query sharding is enabled. #2036 * [BUGFIX] Ruler: report `cortex_ruler_queries_failed_total` metric for any remote query error except 4xx when remote operational mode is enabled. #2053 #2143 * [BUGFIX] Ingester: fix slow rollout when using `-ingester.ring.unregister-on-shutdown=false` with long `-ingester.ring.heartbeat-period`. #2085 -* [BUGFIX] Ruler: add timeout for remote rule evaluation queries to prevent rule group evaluations getting stuck indefinitely. The duration is configurable with (`-ruler.query-frontend.timeout` (default `2m`). #2090 +* [BUGFIX] Ruler: add timeout for remote rule evaluation queries to prevent rule group evaluations getting stuck indefinitely. The duration is configurable with `-querier.timeout` (default `2m`). #2090 #2222 * [BUGFIX] Limits: Active series custom tracker configuration has been named back from `active_series_custom_trackers_config` to `active_series_custom_trackers`. For backwards compatibility both version is going to be supported for until Mimir v2.4. When both fields are specified, `active_series_custom_trackers_config` takes precedence over `active_series_custom_trackers`. #2101 * [BUGFIX] Ingester: fixed the order of labels applied when incrementing the `cortex_discarded_metadata_total` metric. #2096 * [BUGFIX] Ingester: fixed bug where retrieving metadata for a metric with multiple metadata entries would return multiple copies of a single metadata entry rather than all available entries. #2096 diff --git a/cmd/mimir/config-descriptor.json b/cmd/mimir/config-descriptor.json index 080e1f8a75..ce2a130a16 100644 --- a/cmd/mimir/config-descriptor.json +++ b/cmd/mimir/config-descriptor.json @@ -1514,7 +1514,7 @@ "kind": "field", "name": "timeout", "required": false, - "desc": "The timeout for a query. This config option should be set on query-frontend too when query sharding is enabled.", + "desc": "The timeout for a query. This config option should be set on query-frontend too when query sharding is enabled. This also applies to queries evaluated by the ruler (internally or remotely).", "fieldValue": null, "fieldDefaultValue": 120000000000, "fieldFlag": "querier.timeout", @@ -7392,16 +7392,6 @@ "fieldFlag": "ruler.query-frontend.address", "fieldType": "string" }, - { - "kind": "field", - "name": "timeout", - "required": false, - "desc": "The timeout for a rule query being evaluated by the query-frontend.", - "fieldValue": null, - "fieldDefaultValue": 120000000000, - "fieldFlag": "ruler.query-frontend.timeout", - "fieldType": "duration" - }, { "kind": "block", "name": "grpc_client_config", diff --git a/cmd/mimir/help-all.txt.tmpl b/cmd/mimir/help-all.txt.tmpl index 6066f4a2dd..0333bb7205 100644 --- a/cmd/mimir/help-all.txt.tmpl +++ b/cmd/mimir/help-all.txt.tmpl @@ -1112,7 +1112,7 @@ Usage of ./cmd/mimir/mimir: -querier.store-gateway-client.tls-server-name string Override the expected name on the server certificate. -querier.timeout duration - The timeout for a query. This config option should be set on query-frontend too when query sharding is enabled. (default 2m0s) + The timeout for a query. This config option should be set on query-frontend too when query sharding is enabled. This also applies to queries evaluated by the ruler (internally or remotely). (default 2m0s) -query-frontend.align-querier-with-step Mutate incoming queries to align their start and end with their step. -query-frontend.cache-results @@ -1448,8 +1448,6 @@ Usage of ./cmd/mimir/mimir: Path to the key file for the client certificate. Also requires the client certificate to be configured. -ruler.query-frontend.grpc-client-config.tls-server-name string Override the expected name on the server certificate. - -ruler.query-frontend.timeout duration - The timeout for a rule query being evaluated by the query-frontend. (default 2m0s) -ruler.query-stats-enabled Report the wall time for ruler queries to complete as a per-tenant metric and as an info level log message. -ruler.resend-delay duration diff --git a/cmd/mimir/help.txt.tmpl b/cmd/mimir/help.txt.tmpl index f7e9f0e626..b7e9d9b8a3 100644 --- a/cmd/mimir/help.txt.tmpl +++ b/cmd/mimir/help.txt.tmpl @@ -340,7 +340,7 @@ Usage of ./cmd/mimir/mimir: -querier.scheduler-address string Address of the query-scheduler component, in host:port format. Only one of -querier.frontend-address or -querier.scheduler-address can be set. If neither is set, queries are only received via HTTP endpoint. -querier.timeout duration - The timeout for a query. This config option should be set on query-frontend too when query sharding is enabled. (default 2m0s) + The timeout for a query. This config option should be set on query-frontend too when query sharding is enabled. This also applies to queries evaluated by the ruler (internally or remotely). (default 2m0s) -query-frontend.align-querier-with-step Mutate incoming queries to align their start and end with their step. -query-frontend.cache-results @@ -452,8 +452,6 @@ Usage of ./cmd/mimir/mimir: Maximum number of rules per rule group per-tenant. 0 to disable. (default 20) -ruler.query-frontend.address string GRPC listen address of the query-frontend(s). Must be a DNS address (prefixed with dns:///) to enable client side load balancing. - -ruler.query-frontend.timeout duration - The timeout for a rule query being evaluated by the query-frontend. (default 2m0s) -ruler.ring.consul.hostname string Hostname and port of Consul. (default "localhost:8500") -ruler.ring.etcd.endpoints value diff --git a/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md b/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md index 920837a2c0..dc3b0a43dd 100644 --- a/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md +++ b/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md @@ -850,7 +850,8 @@ store_gateway_client: [max_concurrent: | default = 20] # The timeout for a query. This config option should be set on query-frontend -# too when query sharding is enabled. +# too when query sharding is enabled. This also applies to queries evaluated by +# the ruler (internally or remotely). # CLI flag: -querier.timeout [timeout: | default = 2m] @@ -1399,10 +1400,6 @@ query_frontend: # CLI flag: -ruler.query-frontend.address [address: | default = ""] - # The timeout for a rule query being evaluated by the query-frontend. - # CLI flag: -ruler.query-frontend.timeout - [timeout: | default = 2m] - grpc_client_config: # (advanced) gRPC client max receive message size (bytes). # CLI flag: -ruler.query-frontend.grpc-client-config.grpc-max-recv-msg-size diff --git a/pkg/mimir/modules.go b/pkg/mimir/modules.go index a63adb66bf..a3e4fded82 100644 --- a/pkg/mimir/modules.go +++ b/pkg/mimir/modules.go @@ -583,7 +583,7 @@ func (t *Mimir) initRuler() (serv services.Service, err error) { if err != nil { return nil, err } - remoteQuerier := ruler.NewRemoteQuerier(queryFrontendClient, t.Cfg.Ruler.QueryFrontend.Timeout, t.Cfg.API.PrometheusHTTPPrefix, util_log.Logger, ruler.WithOrgIDMiddleware) + remoteQuerier := ruler.NewRemoteQuerier(queryFrontendClient, t.Cfg.Querier.EngineConfig.Timeout, t.Cfg.API.PrometheusHTTPPrefix, util_log.Logger, ruler.WithOrgIDMiddleware) embeddedQueryable = prom_remote.NewSampleAndChunkQueryableClient( remoteQuerier, diff --git a/pkg/querier/engine/config.go b/pkg/querier/engine/config.go index 691bf2597c..d0d18a8942 100644 --- a/pkg/querier/engine/config.go +++ b/pkg/querier/engine/config.go @@ -40,7 +40,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) { } f.IntVar(&cfg.MaxConcurrent, "querier.max-concurrent", 20, sharedWithQueryFrontend("The maximum number of concurrent queries.")) - f.DurationVar(&cfg.Timeout, "querier.timeout", 2*time.Minute, sharedWithQueryFrontend("The timeout for a query.")) + f.DurationVar(&cfg.Timeout, "querier.timeout", 2*time.Minute, sharedWithQueryFrontend("The timeout for a query.")+" This also applies to queries evaluated by the ruler (internally or remotely).") f.IntVar(&cfg.MaxSamples, "querier.max-samples", 50e6, sharedWithQueryFrontend("Maximum number of samples a single query can load into memory.")) f.DurationVar(&cfg.DefaultEvaluationInterval, "querier.default-evaluation-interval", time.Minute, sharedWithQueryFrontend("The default evaluation interval or step size for subqueries.")) f.DurationVar(&cfg.LookbackDelta, "querier.lookback-delta", 5*time.Minute, sharedWithQueryFrontend("Time since the last sample after which a time series is considered stale and ignored by expression evaluations.")) diff --git a/pkg/ruler/remotequerier.go b/pkg/ruler/remotequerier.go index f8b3a475a9..628e5b6a9f 100644 --- a/pkg/ruler/remotequerier.go +++ b/pkg/ruler/remotequerier.go @@ -54,9 +54,6 @@ type QueryFrontendConfig struct { // Address is the address of the query-frontend to connect to. Address string `yaml:"address"` - // Timeout is the length of time we wait on the query-frontend before giving up. - Timeout time.Duration `yaml:"timeout"` - // GRPCClientConfig contains gRPC specific config options. GRPCClientConfig grpcclient.Config `yaml:"grpc_client_config"` } @@ -68,8 +65,6 @@ func (c *QueryFrontendConfig) RegisterFlags(f *flag.FlagSet) { "GRPC listen address of the query-frontend(s). Must be a DNS address (prefixed with dns:///) "+ "to enable client side load balancing.") - f.DurationVar(&c.Timeout, "ruler.query-frontend.timeout", 2*time.Minute, "The timeout for a rule query being evaluated by the query-frontend.") - c.GRPCClientConfig.RegisterFlagsWithPrefix("ruler.query-frontend.grpc-client-config", f) } From 2ce97b7448f51eadc00101bda20c0b75f4d56b82 Mon Sep 17 00:00:00 2001 From: Steve Simpson Date: Fri, 24 Jun 2022 12:37:14 +0200 Subject: [PATCH 40/63] Docs: Document alertmanager shuffle sharding (#2220) Fixes #1958 Co-authored-by: Ursula Kallio --- .../configuring/configuring-shuffle-sharding/index.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/sources/operators-guide/configuring/configuring-shuffle-sharding/index.md b/docs/sources/operators-guide/configuring/configuring-shuffle-sharding/index.md index d980118d9c..09e1cb91cb 100644 --- a/docs/sources/operators-guide/configuring/configuring-shuffle-sharding/index.md +++ b/docs/sources/operators-guide/configuring/configuring-shuffle-sharding/index.md @@ -63,6 +63,7 @@ Grafana Mimir supports shuffle sharding in the following components: - [Store-gateway](#store-gateway-shuffle-sharding) - [Ruler](#ruler-shuffle-sharding) - [Compactor](#compactor-shuffle-sharding) +- [Alertmanager](#alertmanager-shuffle-sharding) When you run Grafana Mimir with the default configuration, shuffle sharding is disabled and you need to explicitly enable it by increasing the shard size either globally or for a given tenant. @@ -194,6 +195,12 @@ When you enable compactor shuffle sharding by setting `-compactor.compactor-tena You can override the compactor shard size on a per-tenant basis setting by `compactor_tenant_shard_size` in the overrides section of the runtime configuration. +### Alertmanager shuffle sharding + +Alertmanager only performs distribution across replicas per tenant. The state and workload is not divided any further. The replication factor setting `-alertmanager.sharding-ring.replication-factor` determines how many replicas are used for a tenant. + +As a result, shuffle sharding is effectively always enabled for Alertmanager. + ### Shuffle sharding impact to the KV store Shuffle sharding does not add additional overhead to the KV store. From adba8ec76f64d548a884a01809b20db94cee6bb4 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> Date: Fri, 24 Jun 2022 17:19:57 +0530 Subject: [PATCH 41/63] Add out-of-order sample support (#2187) * Add out-of-order sample support Signed-off-by: Ganesh Vernekar Co-authored-by: Jesus Vazquez * Fix review comments Signed-off-by: Ganesh Vernekar * Fix tests Signed-off-by: Ganesh Vernekar * Update test to check runtime change of OutOfOrderTimeWindow Signed-off-by: Ganesh Vernekar * Fix race in the test Signed-off-by: Ganesh Vernekar * Fix Peter's comments Signed-off-by: Ganesh Vernekar * Fix CI Signed-off-by: Ganesh Vernekar * Fix review comments Signed-off-by: Ganesh Vernekar Co-authored-by: Jesus Vazquez --- CHANGELOG.md | 7 +- cmd/mimir/config-descriptor.json | 50 +++++-- cmd/mimir/help-all.txt.tmpl | 12 +- .../index.md | 33 ++++- .../operators-guide/mimir-runbooks/_index.md | 5 + pkg/ingester/ingester.go | 71 ++++++--- pkg/ingester/ingester_test.go | 138 +++++++++++++++++- pkg/ingester/metrics.go | 15 +- pkg/ingester/metrics_test.go | 32 +++- pkg/storage/tsdb/config.go | 8 +- pkg/util/globalerror/errors.go | 1 + pkg/util/validation/limits.go | 8 + 12 files changed, 318 insertions(+), 62 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index e9bf43f9f8..4e7355b12d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -18,6 +18,11 @@ * [CHANGE] Querier: deprecated `-querier.shuffle-sharding-ingesters-lookback-period`, instead adding `-querier.shuffle-sharding-ingesters-enabled` to enable or disable shuffle sharding on the read path. The value of `-querier.query-ingesters-within` is now used internally for shuffle sharding lookback. #2110 * [CHANGE] Memberlist: `-memberlist.abort-if-join-fails` now defaults to false. Previously it defaulted to true. #2168 * [CHANGE] Ruler: `/api/v1/rules*` and `/prometheus/rules*` configuration endpoints are removed. Use `/prometheus/config/v1/rules*`. #2182 +* [CHANGE] Ingester: `-ingester.exemplars-update-period` has been renamed to `-ingester.tsdb-config-update-period`. You can use it to update multiple, per-tenant TSDB configurations. #2187 +* [FEATURE] Ingester: (Experimental) Add the ability to ingest out-of-order samples up to an allowed limit. If you enable this feature, it requires additional memory and disk space. This feature also enables a write-behind log, which might lead to longer ingester-start replays. When this feature is disabled, there is no overhead on memory, disk space, or startup times. #2187 + * `-ingester.out-of-order-time-window`, as duration string, allows you to set how back in time a sample can be. The default is `0s`, where `s` is seconds. + * `cortex_ingester_tsdb_out_of_order_samples_appended_total` metric tracks the total number of out-of-order samples ingested by the ingester. + * `cortex_discarded_samples_total` has a new label `reason="sample-too-old"`, when the `-ingester.out-of-order-time-window` flag is greater than zero. The label tracks the number of samples that were discarded for being too old; they were out of order, but beyond the time window allowed. * [ENHANCEMENT] Distributor: Added limit to prevent tenants from sending excessive number of requests: #1843 * The following CLI flags (and their respective YAML config options) have been added: * `-distributor.request-rate-limit` @@ -31,10 +36,10 @@ * [ENHANCEMENT] Upgrade Docker base images to `alpine:3.16.0`. #2028 * [ENHANCEMENT] Store-gateway: Add experimental configuration option for the store-gateway to attempt to pre-populate the file system cache when memory-mapping index-header files. Enabled with `-blocks-storage.bucket-store.index-header.map-populate-enabled=true`. Note this flag only has an effect when running on Linux. #2019 #2054 * [ENHANCEMENT] Chunk Mapper: reduce memory usage of async chunk mapper. #2043 -* [ENHANCEMENT] Ingesters: Added new configuration option that makes it possible for mimir ingesters to perform queries on overlapping blocks in the filesystem. Enabled with `-blocks-storage.tsdb.allow-overlapping-queries`. #2091 * [ENHANCEMENT] Ingester: reduce sleep time when reading WAL. #2098 * [ENHANCEMENT] Compactor: Run sanity check on blocks storage configuration at startup. #2143 * [ENHANCEMENT] Compactor: Add HTTP API for uploading TSDB blocks. Enabled with `-compactor.block-upload-enabled`. #1694 #2126 +* [ENHANCEMENT] Ingester: Enable querying overlapping blocks by default. #2187 * [BUGFIX] Fix regexp parsing panic for regexp label matchers with start/end quantifiers. #1883 * [BUGFIX] Ingester: fixed deceiving error log "failed to update cached shipped blocks after shipper initialisation", occurring for each new tenant in the ingester. #1893 * [BUGFIX] Ring: fix bug where instances may appear unhealthy in the hash ring web UI even though they are not. #1933 diff --git a/cmd/mimir/config-descriptor.json b/cmd/mimir/config-descriptor.json index ce2a130a16..e9edf6f8d5 100644 --- a/cmd/mimir/config-descriptor.json +++ b/cmd/mimir/config-descriptor.json @@ -2306,12 +2306,12 @@ }, { "kind": "field", - "name": "exemplars_update_period", + "name": "tsdb_config_update_period", "required": false, - "desc": "Period with which to update per-tenant max exemplar limit.", + "desc": "Period with which to update the per-tenant TSDB configuration.", "fieldValue": null, "fieldDefaultValue": 15000000000, - "fieldFlag": "ingester.exemplars-update-period", + "fieldFlag": "ingester.tsdb-config-update-period", "fieldType": "duration", "fieldCategory": "experimental" }, @@ -2648,6 +2648,17 @@ "fieldType": "map of tracker name (string) to matcher (string)", "fieldCategory": "advanced" }, + { + "kind": "field", + "name": "out_of_order_time_window", + "required": false, + "desc": "Non-zero value enables out-of-order support for most recent samples that are within the time window in relation to the following two conditions: (1) The newest sample for that time series, if it exists. For example, within [series.maxTime-timeWindow, series.maxTime]). (2) The TSDB's maximum time, if the series does not exist. For example, within [db.maxTime-timeWindow, db.maxTime]). The ingester will need more memory as a factor of _rate of out-of-order samples being ingested_ and _the number of series that are getting out-of-order samples_. You can configure it per tenant.", + "fieldValue": null, + "fieldDefaultValue": 0, + "fieldFlag": "ingester.out-of-order-time-window", + "fieldType": "duration", + "fieldCategory": "experimental" + }, { "kind": "field", "name": "max_fetched_chunks_per_query", @@ -5380,17 +5391,6 @@ "fieldType": "boolean", "fieldCategory": "advanced" }, - { - "kind": "field", - "name": "allow_overlapping_queries", - "required": false, - "desc": "Enable querying overlapping blocks. If there are going to be overlapping blocks in the ingesters this should be enabled.", - "fieldValue": null, - "fieldDefaultValue": false, - "fieldFlag": "blocks-storage.tsdb.allow-overlapping-queries", - "fieldType": "boolean", - "fieldCategory": "experimental" - }, { "kind": "field", "name": "series_hash_cache_max_size_bytes", @@ -5412,6 +5412,28 @@ "fieldFlag": "blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup", "fieldType": "int", "fieldCategory": "advanced" + }, + { + "kind": "field", + "name": "out_of_order_cap_min", + "required": false, + "desc": "Minimum capacity for out-of-order chunks, in samples between 0 and 255.", + "fieldValue": null, + "fieldDefaultValue": 4, + "fieldFlag": "blocks-storage.tsdb.out-of-order-cap-min", + "fieldType": "int", + "fieldCategory": "experimental" + }, + { + "kind": "field", + "name": "out_of_order_cap_max", + "required": false, + "desc": "Maximum capacity for out of order chunks, in samples between 1 and 255.", + "fieldValue": null, + "fieldDefaultValue": 32, + "fieldFlag": "blocks-storage.tsdb.out-of-order-cap-max", + "fieldType": "int", + "fieldCategory": "experimental" } ], "fieldValue": null, diff --git a/cmd/mimir/help-all.txt.tmpl b/cmd/mimir/help-all.txt.tmpl index 0333bb7205..0d46d8d8be 100644 --- a/cmd/mimir/help-all.txt.tmpl +++ b/cmd/mimir/help-all.txt.tmpl @@ -477,8 +477,6 @@ Usage of ./cmd/mimir/mimir: OpenStack Swift user ID. -blocks-storage.swift.username string OpenStack Swift username. - -blocks-storage.tsdb.allow-overlapping-queries - [experimental] Enable querying overlapping blocks. If there are going to be overlapping blocks in the ingesters this should be enabled. -blocks-storage.tsdb.block-ranges-period value TSDB blocks range period. (default 2h0m0s) -blocks-storage.tsdb.close-idle-tsdb-timeout duration @@ -507,6 +505,10 @@ Usage of ./cmd/mimir/mimir: [experimental] True to enable snapshotting of in-memory TSDB data on disk when shutting down. -blocks-storage.tsdb.new-chunk-disk-mapper [experimental] Temporary flag to select whether to use the new (used in upstream Prometheus) or the old (legacy) chunk disk mapper. + -blocks-storage.tsdb.out-of-order-cap-max int + [experimental] Maximum capacity for out of order chunks, in samples between 1 and 255. (default 32) + -blocks-storage.tsdb.out-of-order-cap-min int + [experimental] Minimum capacity for out-of-order chunks, in samples between 0 and 255. (default 4) -blocks-storage.tsdb.retention-period duration TSDB blocks retention in the ingester before a block is removed, relative to the newest block written for the tenant. This should be larger than the -blocks-storage.tsdb.block-ranges-period, -querier.query-store-after and large enough to give store-gateways and queriers enough time to discover newly uploaded blocks. (default 24h0m0s) -blocks-storage.tsdb.series-hash-cache-max-size-bytes uint @@ -841,8 +843,6 @@ Usage of ./cmd/mimir/mimir: Path to the key file for the client certificate. Also requires the client certificate to be configured. -ingester.client.tls-server-name string Override the expected name on the server certificate. - -ingester.exemplars-update-period duration - [experimental] Period with which to update per-tenant max exemplar limit. (default 15s) -ingester.ignore-series-limit-for-metric-names string Comma-separated list of metric names, for which the -ingester.max-global-series-per-metric limit will be ignored. Does not affect the -ingester.max-global-series-per-user limit. -ingester.instance-limits.max-inflight-push-requests int @@ -865,6 +865,8 @@ Usage of ./cmd/mimir/mimir: The maximum number of active series per tenant, across the cluster before replication. 0 to disable. (default 150000) -ingester.metadata-retain-period duration Period at which metadata we have not seen will remain in memory before being deleted. (default 10m0s) + -ingester.out-of-order-time-window value + [experimental] Non-zero value enables out-of-order support for most recent samples that are within the time window in relation to the following two conditions: (1) The newest sample for that time series, if it exists. For example, within [series.maxTime-timeWindow, series.maxTime]). (2) The TSDB's maximum time, if the series does not exist. For example, within [db.maxTime-timeWindow, db.maxTime]). The ingester will need more memory as a factor of _rate of out-of-order samples being ingested_ and _the number of series that are getting out-of-order samples_. You can configure it per tenant. -ingester.rate-update-period duration Period with which to update the per-tenant ingestion rates. (default 15s) -ingester.ring.consul.acl-token string @@ -949,6 +951,8 @@ Usage of ./cmd/mimir/mimir: True to enable the zone-awareness and replicate ingested samples across different availability zones. This option needs be set on ingesters, distributors, queriers and rulers when running in microservices mode. -ingester.stream-chunks-when-using-blocks Stream chunks from ingesters to queriers. (default true) + -ingester.tsdb-config-update-period duration + [experimental] Period with which to update the per-tenant TSDB configuration. (default 15s) -log.format value Output log messages in the given format. Valid formats: [logfmt, json] (default logfmt) -log.level value diff --git a/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md b/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md index dc3b0a43dd..43fe972a73 100644 --- a/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md +++ b/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md @@ -741,9 +741,9 @@ ring: # prod: '{namespace=~"prod-.*"}' [active_series_custom_trackers: | default = ] -# (experimental) Period with which to update per-tenant max exemplar limit. -# CLI flag: -ingester.exemplars-update-period -[exemplars_update_period: | default = 15s] +# (experimental) Period with which to update the per-tenant TSDB configuration. +# CLI flag: -ingester.tsdb-config-update-period +[tsdb_config_update_period: | default = 15s] instance_limits: # (advanced) Max ingestion rate (samples/sec) that ingester will accept. This @@ -2717,6 +2717,18 @@ The `limits` block configures default and per-tenant limits imposed by component # CLI flag: -ingester.active-series-custom-trackers [active_series_custom_trackers: | default = ] +# (experimental) Non-zero value enables out-of-order support for most recent +# samples that are within the time window in relation to the following two +# conditions: (1) The newest sample for that time series, if it exists. For +# example, within [series.maxTime-timeWindow, series.maxTime]). (2) The TSDB's +# maximum time, if the series does not exist. For example, within +# [db.maxTime-timeWindow, db.maxTime]). The ingester will need more memory as a +# factor of _rate of out-of-order samples being ingested_ and _the number of +# series that are getting out-of-order samples_. You can configure it per +# tenant. +# CLI flag: -ingester.out-of-order-time-window +[out_of_order_time_window: | default = 0s] + # Maximum number of chunks that can be fetched in a single query from ingesters # and long-term storage. This limit is enforced in the querier, ruler and # store-gateway. 0 to disable. @@ -3515,11 +3527,6 @@ tsdb: # CLI flag: -blocks-storage.tsdb.isolation-enabled [isolation_enabled: | default = false] - # (experimental) Enable querying overlapping blocks. If there are going to be - # overlapping blocks in the ingesters this should be enabled. - # CLI flag: -blocks-storage.tsdb.allow-overlapping-queries - [allow_overlapping_queries: | default = false] - # (advanced) Max size - in bytes - of the in-memory series hash cache. The # cache is shared across all tenants and it's used only when query sharding is # enabled. @@ -3529,6 +3536,16 @@ tsdb: # (advanced) limit the number of concurrently opening TSDB's on startup # CLI flag: -blocks-storage.tsdb.max-tsdb-opening-concurrency-on-startup [max_tsdb_opening_concurrency_on_startup: | default = 10] + + # (experimental) Minimum capacity for out-of-order chunks, in samples between + # 0 and 255. + # CLI flag: -blocks-storage.tsdb.out-of-order-cap-min + [out_of_order_cap_min: | default = 4] + + # (experimental) Maximum capacity for out of order chunks, in samples between + # 1 and 255. + # CLI flag: -blocks-storage.tsdb.out-of-order-cap-max + [out_of_order_cap_max: | default = 32] ``` ### compactor diff --git a/docs/sources/operators-guide/mimir-runbooks/_index.md b/docs/sources/operators-guide/mimir-runbooks/_index.md index dbd5e23a69..cac755bd0a 100644 --- a/docs/sources/operators-guide/mimir-runbooks/_index.md +++ b/docs/sources/operators-guide/mimir-runbooks/_index.md @@ -1412,6 +1412,11 @@ Common **causes**: > **Note**: You can learn more about out of order samples in Prometheus, in the blog post [Debugging out of order samples](https://www.robustperception.io/debugging-out-of-order-samples/). +### err-mimir-sample-too-old + +This error is similar to `err-mimir-sample-out-of-order`. The main difference is that the out-of-order support is enabled, but the sample is +older than the out-of-order time window as it relates to the latest sample for that particular time series or the TSDB. + ### err-mimir-sample-duplicate-timestamp This error occurs when the ingester rejects a sample because it is a duplicate of a previously received sample with the same timestamp but different value in the same time series. diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 6057439dc9..703ba06fc4 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -85,6 +85,7 @@ const ( instanceIngestionRateTickInterval = time.Second sampleOutOfOrder = "sample-out-of-order" + sampleTooOld = "sample-too-old" newValueForTimestamp = "new-value-for-timestamp" sampleOutOfBounds = "sample-out-of-bounds" ) @@ -122,7 +123,7 @@ type Config struct { ActiveSeriesMetricsIdleTimeout time.Duration `yaml:"active_series_metrics_idle_timeout" category:"advanced"` ActiveSeriesCustomTrackers activeseries.CustomTrackersConfig `yaml:"active_series_custom_trackers" doc:"description=[Deprecated] This config has been moved to the limits config, please set it there. Additional custom trackers for active metrics. If there are active series matching a provided matcher (map value), the count will be exposed in the custom trackers metric labeled using the tracker name (map key). Zero valued counts are not exposed (and removed when they go back to zero)." category:"advanced"` - ExemplarsUpdatePeriod time.Duration `yaml:"exemplars_update_period" category:"experimental"` + TSDBConfigUpdatePeriod time.Duration `yaml:"tsdb_config_update_period" category:"experimental"` BlocksStorageConfig mimir_tsdb.BlocksStorageConfig `yaml:"-"` StreamChunksWhenUsingBlocks bool `yaml:"-" category:"advanced"` @@ -150,7 +151,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet, logger log.Logger) { f.DurationVar(&cfg.ActiveSeriesMetricsIdleTimeout, "ingester.active-series-metrics-idle-timeout", 10*time.Minute, "After what time a series is considered to be inactive.") f.BoolVar(&cfg.StreamChunksWhenUsingBlocks, "ingester.stream-chunks-when-using-blocks", true, "Stream chunks from ingesters to queriers.") - f.DurationVar(&cfg.ExemplarsUpdatePeriod, "ingester.exemplars-update-period", 15*time.Second, "Period with which to update per-tenant max exemplar limit.") + f.DurationVar(&cfg.TSDBConfigUpdatePeriod, "ingester.tsdb-config-update-period", 15*time.Second, "Period with which to update the per-tenant TSDB configuration.") cfg.DefaultLimits.RegisterFlags(f) @@ -413,8 +414,8 @@ func (i *Ingester) updateLoop(ctx context.Context) error { ingestionRateTicker := time.NewTicker(instanceIngestionRateTickInterval) defer ingestionRateTicker.Stop() - exemplarUpdateTicker := time.NewTicker(i.cfg.ExemplarsUpdatePeriod) - defer exemplarUpdateTicker.Stop() + tsdbUpdateTicker := time.NewTicker(i.cfg.TSDBConfigUpdatePeriod) + defer tsdbUpdateTicker.Stop() var activeSeriesTickerChan <-chan time.Time if i.cfg.ActiveSeriesMetricsEnabled { @@ -441,8 +442,8 @@ func (i *Ingester) updateLoop(ctx context.Context) error { } i.tsdbsMtx.RUnlock() - case <-exemplarUpdateTicker.C: - i.applyExemplarsSettings() + case <-tsdbUpdateTicker.C: + i.applyTSDBSettings() case <-activeSeriesTickerChan: i.updateActiveSeries(time.Now()) @@ -495,14 +496,21 @@ func (i *Ingester) updateActiveSeries(now time.Time) { } } -// Go through all tenants and apply the current max-exemplars setting. -// If it changed, tsdb will resize the buffer; if it didn't change tsdb will return quickly. -func (i *Ingester) applyExemplarsSettings() { +// applyTSDBSettings goes through all tenants and applies +// * The current max-exemplars setting. If it changed, tsdb will resize the buffer; if it didn't change tsdb will return quickly. +// * The current out-of-order time window. If it changes from 0 to >0, then a new Write-Behind-Log gets created for that tenant. +func (i *Ingester) applyTSDBSettings() { for _, userID := range i.getTSDBUsers() { globalValue := i.limits.MaxGlobalExemplarsPerUser(userID) localValue := i.limiter.convertGlobalToLocalLimit(userID, globalValue) - // We populate a Config struct with just one value, which is OK - // because Head.ApplyConfig only looks at one value. + + oooTW := i.limits.OutOfOrderTimeWindow(userID) + if oooTW < 0 { + oooTW = 0 + } + + // We populate a Config struct with just TSDB related config, which is OK + // because DB.ApplyConfig only looks at the specified config. // The other fields in Config are things like Rules, Scrape // settings, which don't apply to Head. cfg := promcfg.Config{ @@ -510,13 +518,16 @@ func (i *Ingester) applyExemplarsSettings() { ExemplarsConfig: &promcfg.ExemplarsConfig{ MaxExemplars: int64(localValue), }, + TSDBConfig: &promcfg.TSDBConfig{ + OutOfOrderAllowance: time.Duration(oooTW).Milliseconds(), + }, }, } - tsdb := i.getTSDB(userID) - if tsdb == nil { + db := i.getTSDB(userID) + if db == nil { continue } - if err := tsdb.db.ApplyConfig(&cfg); err != nil { + if err := db.db.ApplyConfig(&cfg); err != nil { level.Error(i.logger).Log("msg", "failed to apply config to TSDB", "user", userID, "err", err) } } @@ -599,6 +610,7 @@ func (i *Ingester) PushWithCleanup(ctx context.Context, req *mimirpb.WriteReques startAppend = time.Now() sampleOutOfBoundsCount = 0 sampleOutOfOrderCount = 0 + sampleTooOldCount = 0 newValueForTimestampCount = 0 perUserSeriesLimitCount = 0 perMetricSeriesLimitCount = 0 @@ -620,12 +632,17 @@ func (i *Ingester) PushWithCleanup(ctx context.Context, req *mimirpb.WriteReques otlog.Int("numseries", len(req.Timeseries))) } + oooTW := i.limits.OutOfOrderTimeWindow(userID) for _, ts := range req.Timeseries { // The labels must be sorted (in our case, it's guaranteed a write request // has sorted labels once hit the ingester). - // Fast path in case we only have samples and they are all out of bounds. - if minAppendTimeAvailable && len(ts.Samples) > 0 && len(ts.Exemplars) == 0 && allOutOfBounds(ts.Samples, minAppendTime) { + // Fast path in case we only have samples and they are all out of bound + // and out-of-order support is not enabled. + // TODO(jesus.vazquez) If we had too many old samples we might want to + // extend the fast path to fail early. + if oooTW <= 0 && minAppendTimeAvailable && + len(ts.Samples) > 0 && len(ts.Exemplars) == 0 && allOutOfBounds(ts.Samples, minAppendTime) { failedSamplesCount += len(ts.Samples) sampleOutOfBoundsCount += len(ts.Samples) @@ -678,6 +695,11 @@ func (i *Ingester) PushWithCleanup(ctx context.Context, req *mimirpb.WriteReques updateFirstPartial(func() error { return newIngestErrSampleOutOfOrder(model.Time(s.TimestampMs), ts.Labels) }) continue + case storage.ErrTooOldSample: + sampleTooOldCount++ + updateFirstPartial(func() error { return newIngestErrSampleTooOld(model.Time(s.TimestampMs), ts.Labels) }) + continue + case storage.ErrDuplicateSampleForTimestamp: newValueForTimestampCount++ updateFirstPartial(func() error { @@ -781,6 +803,9 @@ func (i *Ingester) PushWithCleanup(ctx context.Context, req *mimirpb.WriteReques if sampleOutOfOrderCount > 0 { validation.DiscardedSamples.WithLabelValues(sampleOutOfOrder, userID).Add(float64(sampleOutOfOrderCount)) } + if sampleTooOldCount > 0 { + validation.DiscardedSamples.WithLabelValues(sampleTooOld, userID).Add(float64(sampleTooOldCount)) + } if newValueForTimestampCount > 0 { validation.DiscardedSamples.WithLabelValues(newValueForTimestamp, userID).Add(float64(newValueForTimestampCount)) } @@ -1453,6 +1478,7 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { } maxExemplars := i.limiter.convertGlobalToLocalLimit(userID, i.limits.MaxGlobalExemplarsPerUser(userID)) + oooTW := time.Duration(i.limits.OutOfOrderTimeWindow(userID)) // Create a new user database db, err := tsdb.Open(udir, userLogger, tsdbPromReg, &tsdb.Options{ RetentionDuration: i.cfg.BlocksStorageConfig.TSDB.Retention.Milliseconds(), @@ -1473,8 +1499,11 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { IsolationDisabled: !i.cfg.BlocksStorageConfig.TSDB.IsolationEnabled, HeadChunksWriteQueueSize: i.cfg.BlocksStorageConfig.TSDB.HeadChunksWriteQueueSize, NewChunkDiskMapper: i.cfg.BlocksStorageConfig.TSDB.NewChunkDiskMapper, - AllowOverlappingQueries: i.cfg.BlocksStorageConfig.TSDB.AllowOverlappingQueries, - AllowOverlappingCompaction: false, // always false since Mimir only uploads lvl 1 compacted blocks + AllowOverlappingQueries: true, // We can have overlapping blocks from past or out-of-order enabled during runtime. + AllowOverlappingCompaction: false, // always false since Mimir only uploads lvl 1 compacted blocks + OutOfOrderAllowance: oooTW.Milliseconds(), // The unit must be same as our timestamps. + OutOfOrderCapMin: int64(i.cfg.BlocksStorageConfig.TSDB.OutOfOrderCapMin), + OutOfOrderCapMax: int64(i.cfg.BlocksStorageConfig.TSDB.OutOfOrderCapMax), }, nil) if err != nil { return nil, errors.Wrapf(err, "failed to open TSDB: %s", udir) @@ -2081,7 +2110,11 @@ func newIngestErrSampleTimestampTooOld(timestamp model.Time, labels []mimirpb.La } func newIngestErrSampleOutOfOrder(timestamp model.Time, labels []mimirpb.LabelAdapter) error { - return newIngestErr(globalerror.SampleOutOfOrder, "the sample has been rejected because another sample with a more recent timestamp has already been ingested and out of order samples are not allowed", timestamp, labels) + return newIngestErr(globalerror.SampleOutOfOrder, "the sample has been rejected because another sample with a more recent timestamp has already been ingested and out-of-order samples are not allowed", timestamp, labels) +} + +func newIngestErrSampleTooOld(timestamp model.Time, labels []mimirpb.LabelAdapter) error { + return newIngestErr(globalerror.SampleTooOld, "the sample has been rejected because another sample with a more recent timestamp has already been ingested and this sample is beyond the out-of-order time window", timestamp, labels) } func newIngestErrSampleDuplicateTimestamp(timestamp model.Time, labels []mimirpb.LabelAdapter) error { diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index 6d241c534d..a112bc7a71 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -257,7 +257,7 @@ func TestIngester_Push(t *testing.T) { # TYPE cortex_ingester_tsdb_exemplar_last_exemplars_timestamp_seconds gauge cortex_ingester_tsdb_exemplar_last_exemplars_timestamp_seconds{user="test"} 1 - # HELP cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total Total number of out of order exemplar ingestion failed attempts. + # HELP cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total Total number of out-of-order exemplar ingestion failed attempts. # TYPE cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total counter cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total 0 `, @@ -303,7 +303,7 @@ func TestIngester_Push(t *testing.T) { cortex_ingester_memory_series_removed_total{user="test"} 0 `, }, - "should soft fail on sample out of order": { + "should soft fail on sample out-of-order": { reqs: []*mimirpb.WriteRequest{ mimirpb.ToWriteRequest( []labels.Labels{metricLabels}, @@ -574,7 +574,7 @@ func TestIngester_Push(t *testing.T) { # TYPE cortex_ingester_tsdb_exemplar_last_exemplars_timestamp_seconds gauge cortex_ingester_tsdb_exemplar_last_exemplars_timestamp_seconds{user="test"} 0 - # HELP cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total Total number of out of order exemplar ingestion failed attempts. + # HELP cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total Total number of out-of-order exemplar ingestion failed attempts. # TYPE cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total counter cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total 0 `, @@ -1098,7 +1098,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { } }, }, - "out of order samples": { + "out-of-order samples": { prepareConfig: func(limits *validation.Limits, instanceLimits *InstanceLimits) bool { return true }, beforeBenchmark: func(b *testing.B, ingester *Ingester, numSeriesPerRequest int) { // For each series, push a single sample with a timestamp greater than next pushes. @@ -1117,7 +1117,7 @@ func Benchmark_Ingester_PushOnError(b *testing.B) { runBenchmark: func(b *testing.B, ingester *Ingester, metrics []labels.Labels, samples []mimirpb.Sample) { expectedErr := storage.ErrOutOfOrderSample.Error() - // Push out of order samples. + // Push out-of-order samples. for n := 0; n < b.N; n++ { _, err := ingester.Push(ctx, mimirpb.ToWriteRequest(metrics, samples, nil, nil, mimirpb.API)) // nolint:errcheck @@ -5757,6 +5757,132 @@ func TestGetIgnoreSeriesLimitForMetricNamesMap(t *testing.T) { require.Equal(t, map[string]struct{}{"foo": {}, "bar": {}}, cfg.getIgnoreSeriesLimitForMetricNamesMap()) } +// Test_Ingester_OutOfOrder tests basic ingestion and query of out-of-order samples. +// It also tests if the OutOfOrderTimeWindow gets changed during runtime. +// The correctness of changed runtime is already tested in Prometheus, so we only check if the +// change is being applied here. +func Test_Ingester_OutOfOrder(t *testing.T) { + cfg := defaultIngesterTestConfig(t) + cfg.TSDBConfigUpdatePeriod = 1 * time.Second + + l := defaultLimitsTestConfig() + tenantOverride := new(TenantLimitsMock) + tenantOverride.On("ByUserID", "test").Return(nil) + override, err := validation.NewOverrides(l, tenantOverride) + require.NoError(t, err) + + setOOOTimeWindow := func(oooTW model.Duration) { + tenantOverride.ExpectedCalls = nil + tenantOverride.On("ByUserID", "test").Return(&validation.Limits{ + OutOfOrderTimeWindow: oooTW, + }) + // TSDB config is updated every second. + <-time.After(1500 * time.Millisecond) + } + + i, err := prepareIngesterWithBlockStorageAndOverrides(t, cfg, override, "", nil) + require.NoError(t, err) + require.NoError(t, services.StartAndAwaitRunning(context.Background(), i)) + defer services.StopAndAwaitTerminated(context.Background(), i) //nolint:errcheck + + // Wait until it's healthy + test.Poll(t, 1*time.Second, 1, func() interface{} { + return i.lifecycler.HealthyInstancesCount() + }) + + ctx := user.InjectOrgID(context.Background(), "test") + + pushSamples := func(start, end int64, expErr bool) { + start = start * time.Minute.Milliseconds() + end = end * time.Minute.Milliseconds() + + s := labels.FromStrings(labels.MetricName, "test_1", "status", "200") + var samples []mimirpb.Sample + var lbls []labels.Labels + for ts := start; ts <= end; ts += time.Minute.Milliseconds() { + samples = append(samples, mimirpb.Sample{ + TimestampMs: ts, + Value: float64(ts), + }) + lbls = append(lbls, s) + } + + wReq := mimirpb.ToWriteRequest(lbls, samples, nil, nil, mimirpb.API) + _, err = i.Push(ctx, wReq) + if expErr { + require.Error(t, err, "should have failed on push") + require.ErrorAs(t, err, &storage.ErrTooOldSample) + } else { + require.NoError(t, err) + } + } + + verifySamples := func(start, end int64) { + start = start * time.Minute.Milliseconds() + end = end * time.Minute.Milliseconds() + + var expSamples []model.SamplePair + for ts := start; ts <= end; ts += time.Minute.Milliseconds() { + expSamples = append(expSamples, model.SamplePair{ + Timestamp: model.Time(ts), + Value: model.SampleValue(ts), + }) + } + expMatrix := model.Matrix{{ + Metric: model.Metric{"__name__": "test_1", "status": "200"}, + Values: expSamples, + }} + + req := &client.QueryRequest{ + StartTimestampMs: math.MinInt64, + EndTimestampMs: math.MaxInt64, + Matchers: []*client.LabelMatcher{ + {Type: client.EQUAL, Name: model.MetricNameLabel, Value: "test_1"}, + }, + } + + s := stream{ctx: ctx} + err = i.QueryStream(req, &s) + require.NoError(t, err) + + res, err := chunkcompat.StreamsToMatrix(model.Earliest, model.Latest, s.responses) + require.NoError(t, err) + assert.ElementsMatch(t, expMatrix, res) + } + + // Push first in-order sample at minute 100. + pushSamples(100, 100, false) + verifySamples(100, 100) + + // OOO is not enabled. So it errors out. No sample ingested. + pushSamples(90, 99, true) + verifySamples(100, 100) + + // Increasing the OOO time window. + setOOOTimeWindow(model.Duration(30 * time.Minute)) + // Now it works. + pushSamples(90, 99, false) + verifySamples(90, 100) + + // Gives an error for sample 69 since it's outside time window, but rest is ingested. + pushSamples(69, 99, true) + verifySamples(70, 100) + + // All beyond the ooo time window. None ingested. + pushSamples(50, 69, true) + verifySamples(70, 100) + + // Increase the time window again. It works. + setOOOTimeWindow(model.Duration(60 * time.Minute)) + pushSamples(50, 69, false) + verifySamples(50, 100) + + // Decrease the time window again. Same push should fail. + setOOOTimeWindow(model.Duration(30 * time.Minute)) + pushSamples(50, 69, true) + verifySamples(50, 100) +} + func TestNewIngestErrMsgs(t *testing.T) { timestamp := model.Time(1575043969) metricLabelAdapters := []mimirpb.LabelAdapter{{Name: labels.MetricName, Value: "test"}} @@ -5771,7 +5897,7 @@ func TestNewIngestErrMsgs(t *testing.T) { }, "newIngestErrSampleOutOfOrder": { err: newIngestErrSampleOutOfOrder(timestamp, metricLabelAdapters), - msg: `the sample has been rejected because another sample with a more recent timestamp has already been ingested and out of order samples are not allowed (err-mimir-sample-out-of-order). The affected sample has timestamp 1970-01-19T05:30:43.969Z and is from series {__name__="test"}`, + msg: `the sample has been rejected because another sample with a more recent timestamp has already been ingested and out-of-order samples are not allowed (err-mimir-sample-out-of-order). The affected sample has timestamp 1970-01-19T05:30:43.969Z and is from series {__name__="test"}`, }, "newIngestErrSampleDuplicateTimestamp": { err: newIngestErrSampleDuplicateTimestamp(timestamp, metricLabelAdapters), diff --git a/pkg/ingester/metrics.go b/pkg/ingester/metrics.go index 361130d7ec..a153610bdf 100644 --- a/pkg/ingester/metrics.go +++ b/pkg/ingester/metrics.go @@ -323,6 +323,8 @@ type tsdbMetrics struct { tsdbTimeRetentionCount *prometheus.Desc tsdbBlocksBytes *prometheus.Desc + tsdbOOOAppendedSamples *prometheus.Desc + checkpointDeleteFail *prometheus.Desc checkpointDeleteTotal *prometheus.Desc checkpointCreationFail *prometheus.Desc @@ -436,7 +438,7 @@ func newTSDBMetrics(r prometheus.Registerer) *tsdbMetrics { []string{"operation"}, nil), tsdbOOOHistogram: prometheus.NewDesc( "cortex_ingester_tsdb_sample_out_of_order_delta_seconds", - "Delta in seconds by which a sample is considered out of order.", + "Delta in seconds by which a sample is considered out-of-order.", nil, nil), tsdbLoadedBlocks: prometheus.NewDesc( "cortex_ingester_tsdb_blocks_loaded", @@ -502,7 +504,12 @@ func newTSDBMetrics(r prometheus.Registerer) *tsdbMetrics { []string{"user"}, nil), tsdbExemplarsOutOfOrder: prometheus.NewDesc( "cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total", - "Total number of out of order exemplar ingestion failed attempts.", + "Total number of out-of-order exemplar ingestion failed attempts.", + nil, nil), + + tsdbOOOAppendedSamples: prometheus.NewDesc( + "cortex_ingester_tsdb_out_of_order_samples_appended_total", + "Total number of out-of-order samples appended.", nil, nil), memSeriesCreatedTotal: prometheus.NewDesc( @@ -565,6 +572,8 @@ func (sm *tsdbMetrics) Describe(out chan<- *prometheus.Desc) { out <- sm.tsdbExemplarLastTs out <- sm.tsdbExemplarsOutOfOrder + out <- sm.tsdbOOOAppendedSamples + out <- sm.memSeriesCreatedTotal out <- sm.memSeriesRemovedTotal } @@ -615,6 +624,8 @@ func (sm *tsdbMetrics) Collect(out chan<- prometheus.Metric) { data.SendSumOfGaugesPerUser(out, sm.tsdbExemplarLastTs, "prometheus_tsdb_exemplar_last_exemplars_timestamp_seconds") data.SendSumOfCounters(out, sm.tsdbExemplarsOutOfOrder, "prometheus_tsdb_exemplar_out_of_order_exemplars_total") + data.SendSumOfCounters(out, sm.tsdbOOOAppendedSamples, "prometheus_tsdb_head_out_of_order_samples_appended_total") + data.SendSumOfCountersPerUser(out, sm.memSeriesCreatedTotal, "prometheus_tsdb_head_series_created_total") data.SendSumOfCountersPerUser(out, sm.memSeriesRemovedTotal, "prometheus_tsdb_head_series_removed_total") } diff --git a/pkg/ingester/metrics_test.go b/pkg/ingester/metrics_test.go index 2d9dddafda..0942dafb9d 100644 --- a/pkg/ingester/metrics_test.go +++ b/pkg/ingester/metrics_test.go @@ -187,7 +187,7 @@ func TestTSDBMetrics(t *testing.T) { # TYPE cortex_ingester_tsdb_reloads_total counter cortex_ingester_tsdb_reloads_total 30 - # HELP cortex_ingester_tsdb_sample_out_of_order_delta_seconds Delta in seconds by which a sample is considered out of order. + # HELP cortex_ingester_tsdb_sample_out_of_order_delta_seconds Delta in seconds by which a sample is considered out-of-order. # TYPE cortex_ingester_tsdb_sample_out_of_order_delta_seconds histogram # observations buckets # 600 @@ -225,7 +225,7 @@ func TestTSDBMetrics(t *testing.T) { cortex_ingester_tsdb_exemplar_last_exemplars_timestamp_seconds{user="user2"} 1234 cortex_ingester_tsdb_exemplar_last_exemplars_timestamp_seconds{user="user3"} 1234 - # HELP cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total Total number of out of order exemplar ingestion failed attempts. + # HELP cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total Total number of out-of-order exemplar ingestion failed attempts. # TYPE cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total counter cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total 9 @@ -241,6 +241,10 @@ func TestTSDBMetrics(t *testing.T) { cortex_ingester_tsdb_exemplar_exemplars_appended_total{user="user2"} 100 cortex_ingester_tsdb_exemplar_exemplars_appended_total{user="user3"} 100 + # HELP cortex_ingester_tsdb_out_of_order_samples_appended_total Total number of out-of-order samples appended. + # TYPE cortex_ingester_tsdb_out_of_order_samples_appended_total counter + cortex_ingester_tsdb_out_of_order_samples_appended_total 9 + # HELP cortex_ingester_tsdb_exemplar_exemplars_in_storage Number of TSDB exemplars currently in storage. # TYPE cortex_ingester_tsdb_exemplar_exemplars_in_storage gauge cortex_ingester_tsdb_exemplar_exemplars_in_storage 30 @@ -417,7 +421,7 @@ func TestTSDBMetricsWithRemoval(t *testing.T) { # TYPE cortex_ingester_tsdb_reloads_total counter cortex_ingester_tsdb_reloads_total 30 - # HELP cortex_ingester_tsdb_sample_out_of_order_delta_seconds Delta in seconds by which a sample is considered out of order. + # HELP cortex_ingester_tsdb_sample_out_of_order_delta_seconds Delta in seconds by which a sample is considered out-of-order. # TYPE cortex_ingester_tsdb_sample_out_of_order_delta_seconds histogram # observations buckets # 600 @@ -452,7 +456,7 @@ func TestTSDBMetricsWithRemoval(t *testing.T) { cortex_ingester_tsdb_exemplar_last_exemplars_timestamp_seconds{user="user1"} 1234 cortex_ingester_tsdb_exemplar_last_exemplars_timestamp_seconds{user="user2"} 1234 - # HELP cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total Total number of out of order exemplar ingestion failed attempts. + # HELP cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total Total number of out-of-order exemplar ingestion failed attempts. # TYPE cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total counter cortex_ingester_tsdb_exemplar_out_of_order_exemplars_total 9 @@ -469,6 +473,10 @@ func TestTSDBMetricsWithRemoval(t *testing.T) { # HELP cortex_ingester_tsdb_exemplar_exemplars_in_storage Number of TSDB exemplars currently in storage. # TYPE cortex_ingester_tsdb_exemplar_exemplars_in_storage gauge cortex_ingester_tsdb_exemplar_exemplars_in_storage 20 + + # HELP cortex_ingester_tsdb_out_of_order_samples_appended_total Total number of out-of-order samples appended. + # TYPE cortex_ingester_tsdb_out_of_order_samples_appended_total counter + cortex_ingester_tsdb_out_of_order_samples_appended_total 9 `)) require.NoError(t, err) } @@ -638,7 +646,7 @@ func populateTSDBMetrics(base float64) *prometheus.Registry { tsdbOOOHistogram := promauto.With(r).NewHistogram(prometheus.HistogramOpts{ Name: "prometheus_tsdb_sample_ooo_delta", - Help: "Delta in seconds by which a sample is considered out of order.", + Help: "Delta in seconds by which a sample is considered out-of-order.", Buckets: []float64{60 * 10, 60 * 60 * 24}, // for testing: 3 buckets: 10 min, 24 hour, and inf }) tsdbOOOHistogram.Observe(7 * base) @@ -731,9 +739,21 @@ func populateTSDBMetrics(base float64) *prometheus.Registry { exemplarsOutOfOrderTotal := promauto.With(r).NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_exemplar_out_of_order_exemplars_total", - Help: "Total number of out of order exemplar ingestion failed attempts.", + Help: "Total number of out-of-order exemplar ingestion failed attempts.", }) exemplarsOutOfOrderTotal.Add(3) + outOfOrderSamplesAppendedTotal := promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_head_out_of_order_samples_appended_total", + Help: "Total number of appended out-of-order samples.", + }) + outOfOrderSamplesAppendedTotal.Add(3) + + tooOldSamplesTotal := promauto.With(r).NewCounter(prometheus.CounterOpts{ + Name: "prometheus_tsdb_too_old_samples_total", + Help: "Total number of out-of-order samples ingestion failed attempts.", + }) + tooOldSamplesTotal.Add(3) + return r } diff --git a/pkg/storage/tsdb/config.go b/pkg/storage/tsdb/config.go index 79a60f5b81..c35a5723c3 100644 --- a/pkg/storage/tsdb/config.go +++ b/pkg/storage/tsdb/config.go @@ -164,7 +164,6 @@ type TSDBConfig struct { HeadChunksWriteQueueSize int `yaml:"head_chunks_write_queue_size" category:"experimental"` NewChunkDiskMapper bool `yaml:"new_chunk_disk_mapper" category:"experimental"` IsolationEnabled bool `yaml:"isolation_enabled" category:"advanced"` // TODO Remove in Mimir 2.3.0 - AllowOverlappingQueries bool `yaml:"allow_overlapping_queries" category:"experimental"` // Series hash cache. SeriesHashCacheMaxBytes uint64 `yaml:"series_hash_cache_max_size_bytes" category:"advanced"` @@ -178,6 +177,10 @@ type TSDBConfig struct { // How often to check for idle TSDBs for closing. DefaultCloseIdleTSDBInterval is not suitable for testing, so tests can override. CloseIdleTSDBInterval time.Duration `yaml:"-"` + + // For experimental out of order metrics support. + OutOfOrderCapMin int `yaml:"out_of_order_cap_min" category:"experimental"` + OutOfOrderCapMax int `yaml:"out_of_order_cap_max" category:"experimental"` } // RegisterFlags registers the TSDBConfig flags. @@ -207,7 +210,8 @@ func (cfg *TSDBConfig) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.HeadChunksWriteQueueSize, "blocks-storage.tsdb.head-chunks-write-queue-size", 0, "The size of the write queue used by the head chunks mapper. Lower values reduce memory utilisation at the cost of potentially higher ingest latency. Value of 0 switches chunks mapper to implementation without a queue. This flag is only used if the new chunk disk mapper is enabled with -blocks-storage.tsdb.new-chunk-disk-mapper.") f.BoolVar(&cfg.NewChunkDiskMapper, "blocks-storage.tsdb.new-chunk-disk-mapper", false, "Temporary flag to select whether to use the new (used in upstream Prometheus) or the old (legacy) chunk disk mapper.") f.BoolVar(&cfg.IsolationEnabled, "blocks-storage.tsdb.isolation-enabled", false, "[Deprecated] Enables TSDB isolation feature. Disabling may improve performance.") - f.BoolVar(&cfg.AllowOverlappingQueries, "blocks-storage.tsdb.allow-overlapping-queries", false, "Enable querying overlapping blocks. If there are going to be overlapping blocks in the ingesters this should be enabled.") + f.IntVar(&cfg.OutOfOrderCapMin, "blocks-storage.tsdb.out-of-order-cap-min", 4, "Minimum capacity for out-of-order chunks, in samples between 0 and 255.") + f.IntVar(&cfg.OutOfOrderCapMax, "blocks-storage.tsdb.out-of-order-cap-max", 32, "Maximum capacity for out of order chunks, in samples between 1 and 255.") } // Validate the config. diff --git a/pkg/util/globalerror/errors.go b/pkg/util/globalerror/errors.go index 00b437e433..966bc9bae1 100644 --- a/pkg/util/globalerror/errors.go +++ b/pkg/util/globalerror/errors.go @@ -55,6 +55,7 @@ const ( SampleTimestampTooOld ID = "sample-timestamp-too-old" SampleOutOfOrder ID = "sample-out-of-order" + SampleTooOld ID = "sample-too-old" SampleDuplicateTimestamp ID = "sample-duplicate-timestamp" ExemplarSeriesMissing ID = "exemplar-series-missing" diff --git a/pkg/util/validation/limits.go b/pkg/util/validation/limits.go index c5d35678e7..f53ddf30bd 100644 --- a/pkg/util/validation/limits.go +++ b/pkg/util/validation/limits.go @@ -95,6 +95,8 @@ type Limits struct { // TODO remove this with Mimir version 2.4 ActiveSeriesCustomTrackersConfigOld activeseries.CustomTrackersConfig `yaml:"active_series_custom_trackers_config" json:"active_series_custom_trackers_config" doc:"hidden"` ActiveSeriesCustomTrackersConfig activeseries.CustomTrackersConfig `yaml:"active_series_custom_trackers" json:"active_series_custom_trackers" doc:"description=Additional custom trackers for active metrics. If there are active series matching a provided matcher (map value), the count will be exposed in the custom trackers metric labeled using the tracker name (map key). Zero valued counts are not exposed (and removed when they go back to zero)." category:"advanced"` + // Max allowed time window for out-of-order samples. + OutOfOrderTimeWindow model.Duration `yaml:"out_of_order_time_window" json:"out_of_order_time_window" category:"experimental"` // Querier enforced limits. MaxChunksPerQuery int `yaml:"max_fetched_chunks_per_query" json:"max_fetched_chunks_per_query"` @@ -179,6 +181,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.MaxGlobalMetadataPerMetric, MaxMetadataPerMetricFlag, 0, "The maximum number of metadata per metric, across the cluster. 0 to disable.") f.IntVar(&l.MaxGlobalExemplarsPerUser, "ingester.max-global-exemplars-per-user", 0, "The maximum number of exemplars in memory, across the cluster. 0 to disable exemplars ingestion.") f.Var(&l.ActiveSeriesCustomTrackersConfig, "ingester.active-series-custom-trackers", "Additional active series metrics, matching the provided matchers. Matchers should be in form :, like 'foobar:{foo=\"bar\"}'. Multiple matchers can be provided either providing the flag multiple times or providing multiple semicolon-separated values to a single flag.") + f.Var(&l.OutOfOrderTimeWindow, "ingester.out-of-order-time-window", "Non-zero value enables out-of-order support for most recent samples that are within the time window in relation to the following two conditions: (1) The newest sample for that time series, if it exists. For example, within [series.maxTime-timeWindow, series.maxTime]). (2) The TSDB's maximum time, if the series does not exist. For example, within [db.maxTime-timeWindow, db.maxTime]). The ingester will need more memory as a factor of _rate of out-of-order samples being ingested_ and _the number of series that are getting out-of-order samples_. You can configure it per tenant.") f.IntVar(&l.MaxChunksPerQuery, MaxChunksPerQueryFlag, 2e6, "Maximum number of chunks that can be fetched in a single query from ingesters and long-term storage. This limit is enforced in the querier, ruler and store-gateway. 0 to disable.") f.IntVar(&l.MaxFetchedSeriesPerQuery, MaxSeriesPerQueryFlag, 0, "The maximum number of unique series for which a query can fetch samples from each ingesters and storage. This limit is enforced in the querier and ruler. 0 to disable") @@ -501,6 +504,11 @@ func (o *Overrides) ActiveSeriesCustomTrackersConfig(userID string) activeseries return o.getOverridesForUser(userID).ActiveSeriesCustomTrackersConfig } +// OutOfOrderTimeWindow returns the out-of-order time window for the user. +func (o *Overrides) OutOfOrderTimeWindow(userID string) model.Duration { + return o.getOverridesForUser(userID).OutOfOrderTimeWindow +} + // IngestionTenantShardSize returns the ingesters shard size for a given user. func (o *Overrides) IngestionTenantShardSize(userID string) int { return o.getOverridesForUser(userID).IngestionTenantShardSize From 567e0cc166f138aa6f6b43e07f36180934002575 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20=C5=A0tibran=C3=BD?= Date: Fri, 24 Jun 2022 14:04:12 +0200 Subject: [PATCH 42/63] Updates to RELEASE.md (#2195) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add more releases, split long lines for easier editing. Signed-off-by: Peter Štibraný * Added section on creating release on GitHub with contributor stats and new contributors section. Signed-off-by: Peter Štibraný * Added part about cherry-picking with -x and squash & merge. Signed-off-by: Peter Štibraný * Move opening PR to text. Signed-off-by: Peter Štibraný * What's more in life than happy linter? Signed-off-by: Peter Štibraný * Apply feedback from review. Signed-off-by: Peter Štibraný * Fix. Signed-off-by: Peter Štibraný * Address review feedback. Signed-off-by: Peter Štibraný * Address review feedback. Signed-off-by: Peter Štibraný --- RELEASE.md | 120 +++++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 93 insertions(+), 27 deletions(-) diff --git a/RELEASE.md b/RELEASE.md index a1667cb1d1..6e751afdb1 100644 --- a/RELEASE.md +++ b/RELEASE.md @@ -10,17 +10,28 @@ A new Grafana Mimir release is cut approximately every 6 weeks. The following ta | ------- | ---------- | ----------------- | | 2.0.0 | 2022-03-20 | Marco Pracucci | | 2.1.0 | 2022-05-16 | Johanna Ratliff | -| 2.2.0 | 2022-06-27 | _To be announced_ | +| 2.2.0 | 2022-06-27 | Oleg Zaytsev | | 2.3.0 | 2022-08-08 | _To be announced_ | +| 2.4.0 | 2022-09-19 | _To be announced_ | +| 2.5.0 | 2022-10-31 | _To be announced_ | ## Release shepherd responsibilities -The release shepherd is responsible for an entire minor release series, meaning all pre- and patch releases of a minor release. The process formally starts with the initial pre-release, but some preparations should be made a few days in advance. - -- We aim to keep the main branch in a working state at all times. In principle, it should be possible to cut a release from main at any time. In practice, things might not work out as nicely. A few days before the pre-release is scheduled, the shepherd should check the state of main. Following their best judgement, the shepherd should try to expedite bug fixes that are still in progress but should make it into the release. On the other hand, the shepherd may hold back merging last-minute invasive and risky changes that are better suited for the next minor release. -- There may be some actions left to address when cutting this release. The release shepherd is responsible for going through TODOs in the repository and verifying that nothing is that is due this release is forgotten. -- On the planned release date, the release shepherd cuts the first pre-release (using the suffix `-rc.0`) and creates a new branch called `release-.` starting at the commit tagged for the pre-release. In general, a pre-release is considered a release candidate (that's what `rc` stands for) and should therefore not contain any known bugs that are planned to be fixed in the final release. -- With the pre-release, the release shepherd is responsible for coordinating or running the release candidate in any end user production environment for at least 1 week. This is typically done at Grafana Labs. +The release shepherd is responsible for an entire minor release series, meaning all pre- and patch releases of a minor release. +The process formally starts with the initial pre-release, but some preparations should be made a few days in advance. + +- We aim to keep the `main` branch in a working state at all times. + In principle, it should be possible to cut a release from `main` at any time. + In practice, things might not work out as nicely. + A few days before the pre-release is scheduled, the shepherd should check the state of `main` branch. + Following their best judgement, the shepherd should try to expedite bug fixes that are still in progress but should make it into the release. + On the other hand, the shepherd may hold back merging last-minute invasive and risky changes that are better suited for the next minor release. +- There may be some actions left to address when cutting this release. + The release shepherd is responsible for going through TODOs in the repository and verifying that nothing is that is due this release is forgotten. +- On the planned release date, the release shepherd cuts the first pre-release (using the suffix `-rc.0`) and creates a new branch called `release-.` starting at the commit tagged for the pre-release. + In general, a pre-release is considered a release candidate (that's what `rc` stands for) and should therefore not contain any known bugs that are planned to be fixed in the final release. +- With the pre-release, the release shepherd is responsible for coordinating or running the release candidate in any end user production environment for at least 1 week. + This is typically done at Grafana Labs. - If regressions or critical bugs are detected, they need to get fixed before cutting a new pre-release (called `-rc.1`, `-rc.2`, etc.). See the next section for details on cutting an individual release. @@ -33,13 +44,16 @@ We use [Semantic Versioning](https://semver.org/). We maintain a separate branch for each minor release, named `release-.`, e.g. `release-1.1`, `release-2.0`. -The usual flow is to merge new features and changes into the main branch and to merge bug fixes into the latest release branch. Bug fixes are then merged into main from the latest release branch. The main branch should always contain all commits from the latest release branch. As long as main hasn't deviated significantly from the release branch, new commits can also go to main, followed by cherry picking them back into the release branch. +The usual flow is to merge new features and changes into the `main` branch and to merge bug fixes into the latest release branch. +Bug fixes are then merged into `main` from the latest release branch. +The `main` branch should always contain all commits from the latest release branch. +As long as `main` hasn't deviated significantly from the release branch, new commits can also go to `main`, followed by cherry-picking them back into the release branch. See [Cherry-picking changes into release branch](#cherry-picking-changes-into-release-branch). Maintaining the release branches for older minor releases happens on a best effort basis. ### Show that a release is in progress -This helps ongoing PRs to get their changes in the right place, and to consider whether they need cherry-picking. +This helps ongoing PRs to get their changes in the right place, and to consider whether they need cherry-picking into release branch. 1. Make a PR to update `CHANGELOG.md` on main - Add a new section for the new release so that `## main / unreleased` is blank and at the top. @@ -49,41 +63,74 @@ This helps ongoing PRs to get their changes in the right place, and to consider ### Prepare your release -For a new major or minor release, create the corresponding release branch based on the main branch. For a patch release, work in the branch of the minor release you want to patch. +For a new major or minor release, create the corresponding release branch based on the main branch. +For a patch release, work in the branch of the minor release you want to patch. -To prepare a release branch, first create new release branch (release-X.Y) in the Mimir repository from the main commit of your choice, and then do the following steps on a temporary branch (prepare-release-X.Y) and make a PR to merge said branch into the new release branch (prepare-release-X.Y -> release-X.Y): +To prepare a release branch, first create new release branch (release-X.Y) in the Mimir repository from the main commit of your choice, +and then do the following steps on a temporary branch (prepare-release-X.Y) and make a PR to merge said branch into +the new release branch (prepare-release-X.Y -> release-X.Y): 1. Make sure you've a GPG key associated with your GitHub account (`git tag` will be signed with that GPG key) - You can add a GPG key to your GitHub account following [this procedure](https://help.github.com/articles/generating-a-gpg-key/) -1. Update the version number in the `VERSION` file to say "X.Y-rc.0" +1. Update the version number in the `VERSION` file to say `X.Y-rc.0` 1. Update `CHANGELOG.md` - Ensure changelog entries for the new release are in this order: - `[CHANGE]` - `[FEATURE]` - `[ENHANCEMENT]` - `[BUGFIX]` - - Run `./tools/release/check-changelog.sh LAST-RELEASE-TAG...main` and add any missing PR which includes user-facing changes + - Run `./tools/release/check-changelog.sh LAST-RELEASE-TAG...main` and add any missing PR which includes user-facing changes. + - `check-changelog.sh` script also reports number of PRs and authors on the top. Note the numbers and include them in the release notes in GitHub. + +Once your release preparation PR is approved, merge it to the `release-X.Y` branch, and continue with publishing. + +### Write release notes document + +Each Grafana Mimir release comes with a release notes that is published on the website. This document is stored in `docs/sources/release-notes/`, +and contains following sections: -Once your release preparation PR is approved, merge it to the "release-X.Y" branch, and continue with publishing. +- Features and enhancements +- Upgrade considerations +- Bug fixes + +Please write a draft release notes PR, and get it approved by Grafana Mimir's Product Manager (or ask PM to send PR with the document). +Make sure that release notes document for new version is available from the release branch, and not just `main`. +See [PR 1848](https://github.com/grafana/mimir/pull/1848) for an example PR. ### Publish a release candidate To publish a release candidate: 1. Do not change the release branch directly; make a PR to the release-X.Y branch with VERSION and any CHANGELOG changes. - 1. Ensure the `VERSION` file has the `-rc.X` suffix (`X` starting from `0`) + 1. Ensure the `VERSION` file has the `-rc.X` suffix (`X` starting from `0`). 1. After merging your PR to the release branch, `git tag` the new release (see [How to tag a release](#how-to-tag-a-release)) from the release branch. -1. Wait until the CI pipeline succeeds (once a tag is created, the release process through GitHub Actions will be triggered for this tag) -1. Create a pre-release on GitHub - - Write the release notes (including a copy-paste of the changelog) - - Build binaries with `make BUILD_IN_CONTAINER=true dist` and attach them to the release (building in container ensures standardized toolchain) +1. Wait until the CI pipeline succeeds (once a tag is created, the release process through GitHub Actions will be triggered for this tag). +1. Create a pre-release on GitHub. See [Creating release on GitHub](#creating-release-on-github). + +### Creating release on GitHub + +1. Go to https://github.com/grafana/mimir/releases/new to start a new release on GitHub (or click "Draft a new release" at https://github.com/grafana/mimir/releases page.) +1. Select your new tag, use `Mimir ` as Release Title. Check that "Previous tag" next to "Generate release notes" button shows previous Mimir release. + Click "Generate release notes" button. This will pre-fill the changelog for the release. + You can delete all of it, but keep "New Contributors" section and "Full Changelog" link for later. +1. Release description consists of: + - "This release contains XX contributions from YY authors. Thank you!" at the beginning. + You can find the numbers by running `./tools/release/check-changelog.sh LAST-RELEASE-TAG...NEW-RELEASE-TAG`. + As an example, running the script with `mimir-2.0.0...mimir-2.1.0` argument reports `Found 417 PRs from 47 authors.`. + - After contributor stats, please include content of the release notes document [created previously](#write-release-notes-document). + - After release notes, please copy-paste content of CHANGELOG.md file since the previous release. + - After CHANGELOG, please include "New Contributors" section and "Full Changelog" link at the end. + Both were created previously by "Generate release notes" button in GitHub UI. +1. Build binaries with `make BUILD_IN_CONTAINER=true dist` and attach them to the release (building in container ensures standardized toolchain). ### Publish a stable release -> **Note:** Technical documentation is automatically published on release tags or release branches with a corresponding release tag. The workflow that publishes documentation is defined in [`publish-technical-documentation-release.yml`](.github/workflows/publish-technical-documentation-release.yml). -> To publish a stable release: +> **Note:** Technical documentation is automatically published on release tags or release branches with a corresponding +> release tag. The workflow that publishes documentation is defined in [`publish-technical-documentation-release.yml`](.github/workflows/publish-technical-documentation-release.yml). -1. Do not change the release branch directly; make a PR to the release-X.Y branch with VERSION and any CHANGELOG changes. +To publish a stable release: + +1. Do not change the release branch directly; make a PR to the `release-X.Y` branch with VERSION and any CHANGELOG changes. 1. Ensure the `VERSION` file has **no** `-rc.X` suffix 1. Update the Mimir version in the following locations: - `operations/mimir/images.libsonnet` (`_images.mimir` and `_images.query_tee` fields) @@ -94,14 +141,12 @@ To publish a release candidate: 1. Open a PR 1. After merging your PR to the release branch, `git tag` the new release (see [How to tag a release](#how-to-tag-a-release)) from the release branch. 1. Wait until the CI pipeline succeeds (once a tag is created, the release process through GitHub Actions will be triggered for this tag) -1. Create a release on GitHub - - Write the release notes (including a copy-paste of the changelog) - - Build binaries with `make BUILD_IN_CONTAINER=true dist` and attach them to the release (building in container ensures standardized toolchain) +1. Create a release on GitHub. This is basically a copy of release notes from pre-release version, with up-to-date CHANGELOG (if there were any changes in release candidates). 1. Merge the release branch `release-x.y` into `main` - Create `merge-release-X.Y-to-main` branch **from the `release-X.Y` branch** locally - Merge the upstream `main` branch into your `merge-release-X.Y-to-main` branch and resolve conflicts - Make a PR for merging your `merge-release-X.Y-to-main` branch into `main` - - Once approved, merge the PR with a "Merge" commit through one of the following strategies: + - Once approved, merge the PR with a **Merge** commit through one of the following strategies: - Temporarily enable "Allow merge commits" option in "Settings > Options" - Locally merge the `merge-release-X.Y-to-main` branch into `main`, and push the changes to `main` back to GitHub. This doesn't break `main` branch protection, since the PR has been approved already, and it also doesn't require removing the protection. 1. Open a PR to add the new version to the backward compatibility integration test (`integration/backward_compatibility_test.go`) @@ -115,7 +160,11 @@ To publish a release candidate: ### How to tag a release -Every release is tagged with `mimir-..`, e.g. `mimir-2.0.0`. Note the `mimir-` prefix, which we use to specifically avoid the Go compiler recognizing them as version tags. We don't want compatibility with Go's module versioning scheme, since it would require us to keep each major version's code in its own directory beneath the repository root, f.ex. v2/. We also don't provide any API backwards compatibility guarantees within a single major version. +Every release is tagged with `mimir-..`, e.g. `mimir-2.0.0`. +Note the `mimir-` prefix, which we use to specifically avoid the Go compiler recognizing them as version tags. +We don't want compatibility with Go's module versioning scheme, since it would require us to keep each major version's +code in its own directory beneath the repository root, f.ex. v2/. +We also don't provide any API backwards compatibility guarantees within a single major version. You can do the tagging on the commandline: @@ -124,3 +173,20 @@ $ version=$(< VERSION) $ git tag -s "mimir-${version}" -m "v${version}" $ git push origin "mimir-${version}" ``` + +### Cherry-picking changes into release branch + +To cherry-pick a change (commit) from `main` into release branch, please do the following: + +```bash +$ git checkout release-X.Y # Start with the release branch +$ git checkout -b cherry-pick-pr-ZZZ # Create new branch for cherry-picking +$ git cherry-pick -x # Cherry pick the change using -x option to add original commit ID to the message +$ git push origin cherry-pick-pr-ZZZ # Push branch to GitHub. +``` + +After pushing branch to GitHub, you can create the PR by opening `https://github.com/grafana/mimir/pull/new/cherry-pick-pr-ZZZ` link. +Make sure to set `release-X.Y` as the base branch, into which PR should be merged. +After PR with cherry-picked commit is reviewed, do a standard "Squash & Merge" commit that we use in Mimir. +Keep the commit message suggested by GitHub, which is a combination of original commit message, original PR number, new PR number and cherry-picked commit hash. +GitHub will properly attribute you and also original commit author as contributors to this change, and will also link to original commit in the UI. From 5e9c952bb34a7c6aebf8809bb79afd9366b32d96 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> Date: Fri, 24 Jun 2022 17:48:54 +0530 Subject: [PATCH 43/63] Update to latest mimir-prometheus/main (#2227) Signed-off-by: Ganesh Vernekar --- go.mod | 2 +- go.sum | 4 +-- pkg/ingester/ingester.go | 4 +-- .../prometheus/prometheus/config/config.go | 14 ++++---- .../prometheus/prometheus/tsdb/db.go | 36 +++++++++---------- .../prometheus/prometheus/tsdb/head.go | 30 ++++++++-------- .../prometheus/prometheus/tsdb/head_append.go | 24 ++++++------- .../prometheus/prometheus/tsdb/head_wal.go | 34 +++++++++++++++--- vendor/modules.txt | 4 +-- 9 files changed, 88 insertions(+), 64 deletions(-) diff --git a/go.mod b/go.mod index 9d5cfd6292..5c683f5670 100644 --- a/go.mod +++ b/go.mod @@ -227,7 +227,7 @@ replace git.apache.org/thrift.git => github.com/apache/thrift v0.0.0-20180902110 replace github.com/bradfitz/gomemcache => github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab // Using a fork of Prometheus while we work on querysharding to avoid a dependency on the upstream. -replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20220622114521-df59320886e0 +replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20220624104020-1446b53d874c // Out of order Support forces us to fork thanos because we've changed the ChunkReader interface. // Once the out of order support is upstreamed and Thanos has vendored it, we can remove this override. diff --git a/go.sum b/go.sum index c4cb37a7c6..e66b9e4d05 100644 --- a/go.sum +++ b/go.sum @@ -744,8 +744,8 @@ github.com/grafana/e2e v0.1.1-0.20220519104354-1db01e4751fe h1:mxrRWDjKtob43xF9n github.com/grafana/e2e v0.1.1-0.20220519104354-1db01e4751fe/go.mod h1:+26VJWpczg2OU3D0537acnHSHzhJORpxOs6F+M27tZo= github.com/grafana/memberlist v0.3.1-0.20220425183535-6b97a09b7167 h1:PgEQkGHR4YimSCEGT5IoswN9gJKZDVskf+he6UClCLw= github.com/grafana/memberlist v0.3.1-0.20220425183535-6b97a09b7167/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/grafana/mimir-prometheus v0.0.0-20220622114521-df59320886e0 h1:TXqXoFZweHWWTEX26PZY0RfqivxObBz5nOPU2WcnLvc= -github.com/grafana/mimir-prometheus v0.0.0-20220622114521-df59320886e0/go.mod h1:evpqrqffGRI38M1zH3IHpmXTeho8IfX5Qpx6Ixpqhyk= +github.com/grafana/mimir-prometheus v0.0.0-20220624104020-1446b53d874c h1:oJdK/F/mW2j/dy2nKOtmcMBVnHx70mAf2tE1T2oqLPE= +github.com/grafana/mimir-prometheus v0.0.0-20220624104020-1446b53d874c/go.mod h1:evpqrqffGRI38M1zH3IHpmXTeho8IfX5Qpx6Ixpqhyk= github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 h1:uirlL/j72L93RhV4+mkWhjv0cov2I0MIgPOG9rMDr1k= github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= github.com/grafana/thanos v0.19.1-0.20220610094531-ab07eb568317 h1:DG++oZD7E6YUm8YNZOu7RwZ8J/Slhcx3iOlKQBY6Oh0= diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 703ba06fc4..08b6395b3e 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -519,7 +519,7 @@ func (i *Ingester) applyTSDBSettings() { MaxExemplars: int64(localValue), }, TSDBConfig: &promcfg.TSDBConfig{ - OutOfOrderAllowance: time.Duration(oooTW).Milliseconds(), + OutOfOrderTimeWindow: time.Duration(oooTW).Milliseconds(), }, }, } @@ -1501,7 +1501,7 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { NewChunkDiskMapper: i.cfg.BlocksStorageConfig.TSDB.NewChunkDiskMapper, AllowOverlappingQueries: true, // We can have overlapping blocks from past or out-of-order enabled during runtime. AllowOverlappingCompaction: false, // always false since Mimir only uploads lvl 1 compacted blocks - OutOfOrderAllowance: oooTW.Milliseconds(), // The unit must be same as our timestamps. + OutOfOrderTimeWindow: oooTW.Milliseconds(), // The unit must be same as our timestamps. OutOfOrderCapMin: int64(i.cfg.BlocksStorageConfig.TSDB.OutOfOrderCapMin), OutOfOrderCapMax: int64(i.cfg.BlocksStorageConfig.TSDB.OutOfOrderCapMax), }, nil) diff --git a/vendor/github.com/prometheus/prometheus/config/config.go b/vendor/github.com/prometheus/prometheus/config/config.go index 701fb40d24..5af0a5ba5e 100644 --- a/vendor/github.com/prometheus/prometheus/config/config.go +++ b/vendor/github.com/prometheus/prometheus/config/config.go @@ -507,15 +507,15 @@ type StorageConfig struct { // TSDBConfig configures runtime reloadable configuration options. type TSDBConfig struct { - // OutOfOrderAllowance sets how long back in time an out-of-order sample can be inserted + // OutOfOrderTimeWindow sets how long back in time an out-of-order sample can be inserted // into the TSDB. This is the one finally used by the TSDB and should be in the same unit // as other timestamps in the TSDB. - OutOfOrderAllowance int64 + OutOfOrderTimeWindow int64 - // OutOfOrderAllowanceFlag holds the parsed duration from the config file. - // During unmarshall, this is converted into milliseconds and stored in OutOfOrderAllowance. - // This should not be used directly and must be converted into OutOfOrderAllowance. - OutOfOrderAllowanceFlag model.Duration `yaml:"out_of_order_allowance,omitempty"` + // OutOfOrderTimeWindowFlag holds the parsed duration from the config file. + // During unmarshall, this is converted into milliseconds and stored in OutOfOrderTimeWindow. + // This should not be used directly and must be converted into OutOfOrderTimeWindow. + OutOfOrderTimeWindowFlag model.Duration `yaml:"out_of_order_time_window,omitempty"` } // UnmarshalYAML implements the yaml.Unmarshaler interface. @@ -526,7 +526,7 @@ func (t *TSDBConfig) UnmarshalYAML(unmarshal func(interface{}) error) error { return err } - t.OutOfOrderAllowance = time.Duration(t.OutOfOrderAllowanceFlag).Milliseconds() + t.OutOfOrderTimeWindow = time.Duration(t.OutOfOrderTimeWindowFlag).Milliseconds() return nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go index a1177df0d1..305559e222 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go @@ -182,10 +182,10 @@ type Options struct { // If nil, the cache won't be used. SeriesHashCache *hashcache.SeriesHashCache - // OutOfOrderAllowance specifies how much out of order is allowed, if any. + // OutOfOrderTimeWindow specifies how much out of order is allowed, if any. // This can change during run-time, so this value from here should only be used // while initialising. - OutOfOrderAllowance int64 + OutOfOrderTimeWindow int64 // OutOfOrderCapMin minimum capacity for OOO chunks (in samples). // If it is <=0, the default value is assumed. @@ -662,7 +662,7 @@ func validateOpts(opts *Options, rngs []int64) (*Options, []int64) { if opts.MinBlockDuration > opts.MaxBlockDuration { opts.MaxBlockDuration = opts.MinBlockDuration } - if opts.OutOfOrderAllowance > 0 { + if opts.OutOfOrderTimeWindow > 0 { opts.AllowOverlappingQueries = true } if opts.OutOfOrderCapMin <= 0 { @@ -671,8 +671,8 @@ func validateOpts(opts *Options, rngs []int64) (*Options, []int64) { if opts.OutOfOrderCapMax <= 0 { opts.OutOfOrderCapMax = DefaultOutOfOrderCapMax } - if opts.OutOfOrderAllowance < 0 { - opts.OutOfOrderAllowance = 0 + if opts.OutOfOrderTimeWindow < 0 { + opts.OutOfOrderTimeWindow = 0 } if len(rngs) == 0 { @@ -792,14 +792,14 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs if err != nil { return nil, err } - if opts.OutOfOrderAllowance > 0 { + if opts.OutOfOrderTimeWindow > 0 { wblog, err = wal.NewSize(l, r, wblDir, segmentSize, opts.WALCompression) if err != nil { return nil, err } } } - db.oooWasEnabled.Store(opts.OutOfOrderAllowance > 0) + db.oooWasEnabled.Store(opts.OutOfOrderTimeWindow > 0) headOpts := DefaultHeadOptions() headOpts.ChunkRange = rngs[0] headOpts.ChunkDirRoot = dir @@ -812,7 +812,7 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs headOpts.EnableExemplarStorage = opts.EnableExemplarStorage headOpts.MaxExemplars.Store(opts.MaxExemplars) headOpts.EnableMemorySnapshotOnShutdown = opts.EnableMemorySnapshotOnShutdown - headOpts.OutOfOrderAllowance.Store(opts.OutOfOrderAllowance) + headOpts.OutOfOrderTimeWindow.Store(opts.OutOfOrderTimeWindow) headOpts.OutOfOrderCapMin.Store(opts.OutOfOrderCapMin) headOpts.OutOfOrderCapMax.Store(opts.OutOfOrderCapMax) headOpts.NewChunkDiskMapper = opts.NewChunkDiskMapper @@ -953,32 +953,32 @@ func (db *DB) Appender(ctx context.Context) storage.Appender { } // ApplyConfig applies a new config to the DB. -// Behaviour of 'OutOfOrderAllowance' is as follows: -// OOO enabled = oooAllowance > 0. OOO disabled = oooAllowance is 0. +// Behaviour of 'OutOfOrderTimeWindow' is as follows: +// OOO enabled = oooTimeWindow > 0. OOO disabled = oooTimeWindow is 0. // 1) Before: OOO disabled, Now: OOO enabled => // * A new WBL is created for the head block. // * OOO compaction is enabled. // * Overlapping queries are enabled. // 2) Before: OOO enabled, Now: OOO enabled => -// * Only the allowance is updated. +// * Only the time window is updated. // 3) Before: OOO enabled, Now: OOO disabled => -// * Allowance set to 0. So no new OOO samples will be allowed. +// * Time Window set to 0. So no new OOO samples will be allowed. // * OOO WBL will stay and follow the usual cleanup until a restart. // * OOO Compaction and overlapping queries will remain enabled until a restart. // 4) Before: OOO disabled, Now: OOO disabled => no-op. func (db *DB) ApplyConfig(conf *config.Config) error { - oooAllowance := int64(0) + oooTimeWindow := int64(0) if conf.StorageConfig.TSDBConfig != nil { - oooAllowance = conf.StorageConfig.TSDBConfig.OutOfOrderAllowance + oooTimeWindow = conf.StorageConfig.TSDBConfig.OutOfOrderTimeWindow } - if oooAllowance < 0 { - oooAllowance = 0 + if oooTimeWindow < 0 { + oooTimeWindow = 0 } // Create WBL if it was not present and if OOO is enabled with WAL enabled. var wblog *wal.WAL var err error - if !db.oooWasEnabled.Load() && oooAllowance > 0 && db.opts.WALSegmentSize >= 0 { + if !db.oooWasEnabled.Load() && oooTimeWindow > 0 && db.opts.WALSegmentSize >= 0 { segmentSize := wal.DefaultSegmentSize // Wal is set to a custom size. if db.opts.WALSegmentSize > 0 { @@ -994,7 +994,7 @@ func (db *DB) ApplyConfig(conf *config.Config) error { db.head.ApplyConfig(conf, wblog) if !db.oooWasEnabled.Load() { - db.oooWasEnabled.Store(oooAllowance > 0) + db.oooWasEnabled.Store(oooTimeWindow > 0) } return nil } diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head.go b/vendor/github.com/prometheus/prometheus/tsdb/head.go index aef7dde308..a8d7896ed1 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head.go @@ -150,7 +150,7 @@ type HeadOptions struct { ChunkWriteBufferSize int ChunkEndTimeVariance float64 ChunkWriteQueueSize int - OutOfOrderAllowance atomic.Int64 + OutOfOrderTimeWindow atomic.Int64 OutOfOrderCapMin atomic.Int64 OutOfOrderCapMax atomic.Int64 @@ -214,11 +214,11 @@ func NewHead(r prometheus.Registerer, l log.Logger, wal, wbl *wal.WAL, opts *Hea l = log.NewNopLogger() } - if opts.OutOfOrderAllowance.Load() < 0 { - opts.OutOfOrderAllowance.Store(0) + if opts.OutOfOrderTimeWindow.Load() < 0 { + opts.OutOfOrderTimeWindow.Store(0) } - // Allowance can be set on runtime. So the capMin and capMax should be valid + // Time window can be set on runtime. So the capMin and capMax should be valid // even if ooo is not enabled yet. capMin, capMax := opts.OutOfOrderCapMin.Load(), opts.OutOfOrderCapMax.Load() if capMin > 255 { @@ -429,7 +429,7 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { }), tooOldSamples: prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_too_old_samples_total", - Help: "Total number of out of order samples ingestion failed attempts with out of support enabled, but sample outside of allowance.", + Help: "Total number of out of order samples ingestion failed attempts with out of support enabled, but sample outside of time window.", }), headTruncateFail: prometheus.NewCounter(prometheus.CounterOpts{ Name: "prometheus_tsdb_head_truncations_failed_total", @@ -465,7 +465,7 @@ func newHeadMetrics(h *Head, r prometheus.Registerer) *headMetrics { }), oooHistogram: prometheus.NewHistogram(prometheus.HistogramOpts{ Name: "prometheus_tsdb_sample_ooo_delta", - Help: "Delta in seconds by which a sample is considered out of order (reported regardless of OOO allowance and whether sample is accepted or not).", + Help: "Delta in seconds by which a sample is considered out of order (reported regardless of OOO time window and whether sample is accepted or not).", Buckets: []float64{ // Note that mimir distributor only gives us a range of wallclock-12h to wallclock+15min 60 * 10, // 10 min @@ -882,15 +882,15 @@ func (h *Head) removeCorruptedMmappedChunks(err error) (map[chunks.HeadSeriesRef } func (h *Head) ApplyConfig(cfg *config.Config, wbl *wal.WAL) { - oooAllowance := int64(0) + oooTimeWindow := int64(0) if cfg.StorageConfig.TSDBConfig != nil { - oooAllowance = cfg.StorageConfig.TSDBConfig.OutOfOrderAllowance + oooTimeWindow = cfg.StorageConfig.TSDBConfig.OutOfOrderTimeWindow } - if oooAllowance < 0 { - oooAllowance = 0 + if oooTimeWindow < 0 { + oooTimeWindow = 0 } - h.SetOutOfOrderAllowance(oooAllowance, wbl) + h.SetOutOfOrderTimeWindow(oooTimeWindow, wbl) if !h.opts.EnableExemplarStorage { return @@ -911,14 +911,14 @@ func (h *Head) ApplyConfig(cfg *config.Config, wbl *wal.WAL) { level.Info(h.logger).Log("msg", "Exemplar storage resized", "from", prevSize, "to", newSize, "migrated", migrated) } -// SetOutOfOrderAllowance updates the out of order related parameters. +// SetOutOfOrderTimeWindow updates the out of order related parameters. // If the Head already has a WBL set, then the wbl will be ignored. -func (h *Head) SetOutOfOrderAllowance(oooAllowance int64, wbl *wal.WAL) { - if oooAllowance > 0 && h.wbl == nil { +func (h *Head) SetOutOfOrderTimeWindow(oooTimeWindow int64, wbl *wal.WAL) { + if oooTimeWindow > 0 && h.wbl == nil { h.wbl = wbl } - h.opts.OutOfOrderAllowance.Store(oooAllowance) + h.opts.OutOfOrderTimeWindow.Store(oooTimeWindow) } // PostingsCardinalityStats returns top 10 highest cardinality stats By label and value names. diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go index b16cb201d7..6f66b169e1 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_append.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_append.go @@ -253,8 +253,8 @@ type headAppender struct { func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64, v float64) (storage.SeriesRef, error) { // For OOO inserts, this restriction is irrelevant and will be checked later once we confirm the sample is an in-order append. // If OOO inserts are disabled, we may as well as check this as early as we can and avoid more work. - oooAllowance := a.head.opts.OutOfOrderAllowance.Load() - if oooAllowance == 0 && t < a.minValidTime { + oooTimeWindow := a.head.opts.OutOfOrderTimeWindow.Load() + if oooTimeWindow == 0 && t < a.minValidTime { a.head.metrics.outOfBoundSamples.Inc() return 0, storage.ErrOutOfBounds } @@ -288,7 +288,7 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 s.Lock() // TODO: if we definitely know at this point that the sample is ooo, then optimise // to skip that sample from the WAL and write only in the WBL. - _, delta, err := s.appendable(t, v, a.headMaxt, a.minValidTime, oooAllowance) + _, delta, err := s.appendable(t, v, a.headMaxt, a.minValidTime, oooTimeWindow) if err == nil { s.pendingCommit = true } @@ -325,7 +325,7 @@ func (a *headAppender) Append(ref storage.SeriesRef, lset labels.Labels, t int64 // appendable checks whether the given sample is valid for appending to the series. (if we return false and no error) // The sample belongs to the out of order chunk if we return true and no error. // An error signifies the sample cannot be handled. -func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooAllowance int64) (isOutOfOrder bool, delta int64, err error) { +func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooTimeWindow int64) (isOutOfOrder bool, delta int64, err error) { msMaxt := s.maxTime() if msMaxt == math.MinInt64 { // The series has no sample and was freshly created. @@ -334,7 +334,7 @@ func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooAl return false, 0, nil } - // We cannot append it in the in-order head. So we check the oooAllowance + // We cannot append it in the in-order head. So we check the oooTimeWindow // w.r.t. the head's maxt. // -1 because for the first sample in the Head, headMaxt will be equal to t. msMaxt = headMaxt - 1 @@ -344,8 +344,8 @@ func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooAl return false, 0, nil } - if t < msMaxt-oooAllowance { - if oooAllowance > 0 { + if t < msMaxt-oooTimeWindow { + if oooTimeWindow > 0 { return true, msMaxt - t, storage.ErrTooOldSample } if t < minValidTime { @@ -355,7 +355,7 @@ func (s *memSeries) appendable(t int64, v float64, headMaxt, minValidTime, oooAl } if t != msMaxt || s.head() == nil { - // Sample is ooo and within allowance OR series has no active chunk to check for duplicate sample. + // Sample is ooo and within time window OR series has no active chunk to check for duplicate sample. return true, msMaxt - t, nil } @@ -503,9 +503,9 @@ func (a *headAppender) Commit() (err error) { var ( samplesAppended = len(a.samples) - oooAccepted int // number of samples out of order but accepted: with ooo enabled and within allowance + oooAccepted int // number of samples out of order but accepted: with ooo enabled and within time window oooRejected int // number of samples rejected due to: out of order but OOO support disabled. - tooOldRejected int // number of samples rejected due to: that are out of order but too old (OOO support enabled, but outside allowance) + tooOldRejected int // number of samples rejected due to: that are out of order but too old (OOO support enabled, but outside time window) oobRejected int // number of samples rejected due to: out of bounds: with t < minValidTime (OOO support disabled) inOrderMint int64 = math.MaxInt64 inOrderMaxt int64 = math.MinInt64 @@ -554,12 +554,12 @@ func (a *headAppender) Commit() (err error) { wblSamples = nil oooMmapMarkers = nil } - oooAllowance := a.head.opts.OutOfOrderAllowance.Load() + oooTimeWindow := a.head.opts.OutOfOrderTimeWindow.Load() for i, s := range a.samples { series = a.sampleSeries[i] series.Lock() - oooSample, delta, err := series.appendable(s.T, s.V, a.headMaxt, a.minValidTime, oooAllowance) + oooSample, delta, err := series.appendable(s.T, s.V, a.headMaxt, a.minValidTime, oooTimeWindow) switch err { case storage.ErrOutOfOrderSample: samplesAppended-- diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go index 1a78f01791..e92bcc8b88 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go @@ -357,6 +357,19 @@ func (h *Head) resetSeriesWithMMappedChunks(mSeries *memSeries, mmc, oooMmc []*m mSeries.mmMaxTime = mmc[len(mmc)-1].maxTime h.updateMinMaxTime(mmc[0].minTime, mSeries.mmMaxTime) } + if len(oooMmc) != 0 { + // mint and maxt can be in any chunk, they are not sorted. + mint, maxt := int64(math.MaxInt64), int64(math.MinInt64) + for _, ch := range oooMmc { + if ch.minTime < mint { + mint = ch.minTime + } + if ch.maxTime > maxt { + maxt = ch.maxTime + } + } + h.updateMinOOOMaxOOOTime(mint, maxt) + } // Any samples replayed till now would already be compacted. Resetting the head chunk. // We do not reset oooHeadChunk because that is being replayed from a different WAL @@ -497,7 +510,7 @@ func (h *Head) loadWbl(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H processors[i].setup() go func(wp *wblSubsetProcessor) { - unknown := wp.processWALSamples(h) + unknown := wp.processWBLSamples(h) unknownRefs.Add(unknown) wg.Done() }(&processors[i]) @@ -679,14 +692,14 @@ func (wp *wblSubsetProcessor) reuseBuf() []record.RefSample { return nil } -// processWALSamples adds the samples it receives to the head and passes +// processWBLSamples adds the samples it receives to the head and passes // the buffer received to an output channel for reuse. // Samples before the minValidTime timestamp are discarded. -func (wp *wblSubsetProcessor) processWALSamples(h *Head) (unknownRefs uint64) { +func (wp *wblSubsetProcessor) processWBLSamples(h *Head) (unknownRefs uint64) { defer close(wp.output) // We don't check for minValidTime for ooo samples. - + mint, maxt := int64(math.MaxInt64), int64(math.MinInt64) for samples := range wp.input { wp.mx.Lock() for _, s := range samples { @@ -695,15 +708,26 @@ func (wp *wblSubsetProcessor) processWALSamples(h *Head) (unknownRefs uint64) { unknownRefs++ continue } - if _, chunkCreated, _ := ms.insert(s.T, s.V, h.chunkDiskMapper); chunkCreated { + ok, chunkCreated, _ := ms.insert(s.T, s.V, h.chunkDiskMapper) + if chunkCreated { h.metrics.chunksCreated.Inc() h.metrics.chunks.Inc() } + if ok { + if s.T < mint { + mint = s.T + } + if s.T > maxt { + maxt = s.T + } + } } wp.mx.Unlock() wp.output <- samples } + h.updateMinOOOMaxOOOTime(mint, maxt) + return unknownRefs } diff --git a/vendor/modules.txt b/vendor/modules.txt index 6d57f111d4..376c1334ef 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -717,7 +717,7 @@ github.com/prometheus/node_exporter/https github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v1.8.2-0.20220308163432-03831554a519 => github.com/grafana/mimir-prometheus v0.0.0-20220622114521-df59320886e0 +# github.com/prometheus/prometheus v1.8.2-0.20220308163432-03831554a519 => github.com/grafana/mimir-prometheus v0.0.0-20220624104020-1446b53d874c ## explicit; go 1.17 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -1227,7 +1227,7 @@ gopkg.in/yaml.v2 gopkg.in/yaml.v3 # git.apache.org/thrift.git => github.com/apache/thrift v0.0.0-20180902110319-2566ecd5d999 # github.com/bradfitz/gomemcache => github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab -# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20220622114521-df59320886e0 +# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20220624104020-1446b53d874c # github.com/thanos-io/thanos => github.com/grafana/thanos v0.19.1-0.20220610094531-ab07eb568317 # github.com/hashicorp/go-immutable-radix => github.com/hashicorp/go-immutable-radix v1.2.0 # github.com/hashicorp/go-hclog => github.com/hashicorp/go-hclog v0.12.2 From 9bce501eaccf2439bb71423e806a2b1078700516 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20=C5=A0tibran=C3=BD?= Date: Fri, 24 Jun 2022 15:09:41 +0200 Subject: [PATCH 44/63] Update runbooks to mention possibility to investigate memberlist KV store in various alerts (#2158) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Update runbooks to mention possibility to investigate memberlist KV store in various alerts. Signed-off-by: Peter Štibraný * CHANGELOG.md Signed-off-by: Peter Štibraný * Apply feedback from review. Signed-off-by: Peter Štibraný * Make linter happy :whale: Signed-off-by: Peter Štibraný * Make linter happy :goat: Signed-off-by: Peter Štibraný --- CHANGELOG.md | 1 + .../operators-guide/mimir-runbooks/_index.md | 19 +++++++++++++------ 2 files changed, 14 insertions(+), 6 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 4e7355b12d..c548fcbe51 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -110,6 +110,7 @@ * [ENHANCEMENT] Clarify "Set rule group" API specification. #1869 * [ENHANCEMENT] Published Mimir jsonnet documentation. #2024 * [ENHANCEMENT] Documented required scrape interval for using alerting and recording rules from Mimir jsonnet. #2147 +* [ENHANCEMENT] Runbooks: Mention memberlist as possible source of problems for various alerts. #2158 * [ENHANCEMENT] Documented how to configure queriers’ autoscaling with Jsonnet. #2128 * [BUGFIX] Fixed ruler configuration used in the getting started guide. #2052 * [BUGFIX] Fixed Mimir Alertmanager datasource in Grafana used by "Play with Grafana Mimir" tutorial. #2115 diff --git a/docs/sources/operators-guide/mimir-runbooks/_index.md b/docs/sources/operators-guide/mimir-runbooks/_index.md index cac755bd0a..179b7f96ec 100644 --- a/docs/sources/operators-guide/mimir-runbooks/_index.md +++ b/docs/sources/operators-guide/mimir-runbooks/_index.md @@ -265,6 +265,7 @@ How to **investigate**: - If the failing service is going OOM (`OOMKilled`): scale up or increase the memory - If the failing service is crashing / panicking: look for the stack trace in the logs and investigate from there - If crashing service is query-frontend, querier or store-gateway, and you have "activity tracker" feature enabled, look for `found unfinished activities from previous run` message and subsequent `activity` messages in the log file to see which queries caused the crash. +- When using Memberlist as KV store for hash rings, ensure that Memberlist is working correctly. See instructions for [`MimirGossipMembersMismatch`](#MimirGossipMembersMismatch) alert. #### Alertmanager @@ -296,6 +297,8 @@ More information: This alert occurs when a ruler is unable to validate whether or not it should claim ownership over the evaluation of a rule group. The most likely cause is that one of the rule ring entries is unhealthy. If this is the case proceed to the ring admin http page and forget the unhealth ruler. The other possible cause would be an error returned the ring client. If this is the case look into debugging the ring based on the in-use backend implementation. +When using Memberlist as KV store for hash rings, ensure that Memberlist is working correctly. See instructions for [`MimirGossipMembersMismatch`](#MimirGossipMembersMismatch) alert. + ### MimirRulerTooManyFailedPushes This alert fires when rulers cannot push new samples (result of rule evaluation) to ingesters. @@ -306,6 +309,7 @@ This alert fires only for first kind of problems, and not for problems caused by How to **fix** it: - Investigate the ruler logs to find out the reason why ruler cannot write samples. Note that ruler logs all push errors, including "user errors", but those are not causing the alert to fire. Focus on problems with ingesters. +- When using Memberlist as KV store for hash rings, ensure that Memberlist is working correctly. See instructions for [`MimirGossipMembersMismatch`](#MimirGossipMembersMismatch) alert. ### MimirRulerTooManyFailedQueries @@ -319,6 +323,7 @@ How to **fix** it: - Investigate the ruler logs to find out the reason why ruler cannot evaluate queries. Note that ruler logs rule evaluation errors even for "user errors", but those are not causing the alert to fire. Focus on problems with ingesters or store-gateways. - In case remote operational mode is enabled the problem could be at any of the ruler query path components (ruler-query-frontend, ruler-query-scheduler and ruler-querier). Check the `Mimir / Remote ruler reads` and `Mimir / Remote ruler reads resources` dashboards to find out in which Mimir service the error is being originated. +- When using Memberlist as KV store for hash rings, ensure that Memberlist is working correctly. See instructions for [`MimirGossipMembersMismatch`](#MimirGossipMembersMismatch) alert. ### MimirRulerMissedEvaluations @@ -761,12 +766,12 @@ This alert fires when any instance does not register all other instances as memb How it **works**: -- This alert applies when memberlist is used for the ring backing store. +- This alert applies when memberlist is used as KV store for hash rings. - All Mimir instances using the ring, regardless of type, join a single memberlist cluster. -- Each instance (=memberlist cluster member) should be able to see all others. +- Each instance (ie. memberlist cluster member) should see all memberlist cluster members. - Therefore the following should be equal for every instance: - The reported number of cluster members (`memberlist_client_cluster_members_count`) - - The total number of currently responsive instances. + - The total number of currently responsive instances that use memberlist KV store for hash ring. How to **investigate**: @@ -783,7 +788,7 @@ How to **investigate**: - `memberlist_tcp_transport_packets_sent_errors_total` - `memberlist_tcp_transport_packets_received_errors_total` - These errors (and others) can be found by searching for messages prefixed with `TCPTransport:`. -- Logs coming directly from memberlist are also logged by Mimir; they may indicate where to investigate further. These can be identified as such due to being tagged with `caller=memberlist_logger.go:xyz`. +- Logs coming directly from memberlist are also logged by Mimir; they may indicate where to investigate further. These can be identified as such due to being tagged with `caller=memberlist_logger.go:`. ### EtcdAllocatingTooMuchMemory @@ -831,11 +836,12 @@ This alert is fired when the multi-tenant alertmanager has been unable to check When the alertmanager loads its configuration on start up, when it polls for config changes or when there is a ring change it must check the ring to see if the tenant is still owned on this shard. To prevent one error from causing the loading of all configurations to fail we assume that on error the tenant is NOT owned for this shard. If checking the ring continues to fail then some tenants might not be assigned an alertmanager and might not be able to receive notifications for their alerts. -The metric for this alert is cortex_alertmanager_ring_check_errors_total. +The metric for this alert is `cortex_alertmanager_ring_check_errors_total`. How to **investigate**: -Look at the error message that is logged and attempt to understand what is causing the failure. In most cases the error will be encountered when attempting to read from the ring, which can fail if there is an issue with in-use backend implementation. +- Look at the error message that is logged and attempt to understand what is causing the failure. In most cases the error will be encountered when attempting to read from the ring, which can fail if there is an issue with in-use backend implementation. +- When using Memberlist as KV store for hash rings, ensure that Memberlist is working correctly. See instructions for [`MimirGossipMembersMismatch`](#MimirGossipMembersMismatch) alert. ### MimirAlertmanagerPartialStateMergeFailing @@ -920,6 +926,7 @@ How to **investigate**: ### MimirKVStoreFailure This alert fires if a Mimir instance is failing to run any operation on a KV store (eg. consul or etcd). +When using Memberlist as KV store for hash rings, all read and update operations work on a local copy of the hash ring, and will never fail and raise this alert. How it **works**: From e181fa27969ce9c513d85d10aee82e513eb194a6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20=C5=A0tibran=C3=BD?= Date: Fri, 24 Jun 2022 15:51:59 +0200 Subject: [PATCH 45/63] Article about migrating from Consul to memberlist. Added documentation for /memberlist endpoint. (#2166) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Article about migrating from Consul to memberlist. Signed-off-by: Peter Štibraný * CHANGELOG.md Signed-off-by: Peter Štibraný * Lint Signed-off-by: Peter Štibraný * Apply review feedback, add documentation for /memberlist admin page. Signed-off-by: Peter Štibraný * Mention that /memberlist admin page is now documented in API docs. Signed-off-by: Peter Štibraný * Explain queries. Signed-off-by: Peter Štibraný * make doc * Fix links. Signed-off-by: Peter Štibraný * Apply review feedback. Signed-off-by: Peter Štibraný --- CHANGELOG.md | 2 + .../migrating-from-consul-to-memberlist.md | 142 ++++++++++++++++++ .../reference-http-api/index.md | 13 ++ operations/mimir/memberlist.libsonnet | 18 +-- 4 files changed, 158 insertions(+), 17 deletions(-) create mode 100644 docs/sources/operators-guide/deploying-grafana-mimir/jsonnet/migrating-from-consul-to-memberlist.md diff --git a/CHANGELOG.md b/CHANGELOG.md index c548fcbe51..6eb02aeeec 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -111,6 +111,8 @@ * [ENHANCEMENT] Published Mimir jsonnet documentation. #2024 * [ENHANCEMENT] Documented required scrape interval for using alerting and recording rules from Mimir jsonnet. #2147 * [ENHANCEMENT] Runbooks: Mention memberlist as possible source of problems for various alerts. #2158 +* [ENHANCEMENT] Added step-by-step article about migrating from Consul to Memberlist KV store using jsonnet without downtime. #2166 +* [ENHANCEMENT] Documented `/memberlist` admin page. #2166 * [ENHANCEMENT] Documented how to configure queriers’ autoscaling with Jsonnet. #2128 * [BUGFIX] Fixed ruler configuration used in the getting started guide. #2052 * [BUGFIX] Fixed Mimir Alertmanager datasource in Grafana used by "Play with Grafana Mimir" tutorial. #2115 diff --git a/docs/sources/operators-guide/deploying-grafana-mimir/jsonnet/migrating-from-consul-to-memberlist.md b/docs/sources/operators-guide/deploying-grafana-mimir/jsonnet/migrating-from-consul-to-memberlist.md new file mode 100644 index 0000000000..ac162c1a60 --- /dev/null +++ b/docs/sources/operators-guide/deploying-grafana-mimir/jsonnet/migrating-from-consul-to-memberlist.md @@ -0,0 +1,142 @@ +--- +title: "Migrating from Consul to Memberlist KV store for hash rings without downtime" +menuTitle: "Migrating from Consul to Memberlist" +description: "Learn how to migrate from using Consul as KV store for hash rings to using memberlist without any downtime." +weight: 40 +--- + +# Migrating from Consul to memberlist KV store for hash rings without downtime + +Mimir Jsonnet uses memberlist as KV store for hash rings since Mimir 2.2.0. + +Memberlist can be disabled by using the following configuration: + +```jsonnet +{ + _config+:: { + memberlist_ring_enabled: false + } +} +``` + +If you are running Mimir hash rings with Consul and would like to migrate to memberlist without any downtime, you can follow instructions in this document. + +## Step 1: Enable memberlist and multi KV store. + +```jsonnet +{ + _config+:: { + memberlist_ring_enabled: true, + multikv_migration_enabled: true, + } +} +``` + +Step 1 configures components to use `multi` KV store, with `consul` as primary and memberlist as secondary stores. +This step requires rollout of all Mimir components. +After applying this step all Mimir components will expose [`/memberlist`]({{< relref "../../reference-http-api/index.md#memberlist-cluster" >}}) page on HTTP admin interface, which can be used to check health of memberlist cluster. + +## Step 2: Enable KV store mirroring + +```jsonnet +{ + _config+:: { + memberlist_ring_enabled: true, + multikv_migration_enabled: true, + multikv_mirror_enabled: true, // Changed in this step. + } +} +``` + +In this step we enable writes to primary KV store (Consul) to be mirrored into secondary store (memberlist). +Applying this change will not cause restart of Mimir components. + +You can monitor following metrics to check if mirroring was enabled on all components and if it works correctly: + +- `cortex_multikv_mirror_enabled` – shows which components have KV store mirroring enabled. All Mimir components should start mirroring to secondary KV store reloading runtime configuration. +- `rate(cortex_multikv_mirror_writes_total[1m])` – shows rate of writes to secondary KV store in writes per second. +- `rate(cortex_multikv_mirror_write_errors_total[1m])` – shows rate of write errors to secondary KV store, in errors per second. + +After mirroring is enabled, you should see a key for each Mimir hash ring in the [Memberlist cluster information]({{< relref "../../reference-http-api/index.md#memberlist-cluster" >}}) admin page. +See [list of components that use hash ring]({{< relref "../../architecture/hash-ring/index.md" >}}). + +## Step 3: Switch Primary and Secondary store + +```jsonnet +{ + _config+:: { + memberlist_ring_enabled: true, + multikv_migration_enabled: true, + multikv_mirror_enabled: true, + multikv_switch_primary_secondary: true, // Changed in this step. + } +} +``` + +This change will switch primary and secondary stores as used by `multi` KV. +From this point on Mimir components will use memberlist as primary KV store, and they will mirror updates to Consul. +This step does not require restart of Mimir components. + +To see if all components started to use memberlist as primary store, please watch `cortex_multikv_primary_store` metric. + +## Step 4: Disable mirroring to Consul + +```jsonnet +{ + _config+:: { + memberlist_ring_enabled: true, + multikv_migration_enabled: true, + multikv_mirror_enabled: false, // Changed in this step. + multikv_switch_primary_secondary: true, + } +} +``` + +This step does not require restart of any Mimir component. After applying the change components will stop writing ring updates to Consul, and will only use memberlist. +You can watch `cortex_multikv_mirror_enabled` metric to see if all components have picked up updated configuration. + +## Step 5: Disable `multi` KV Store + +```jsonnet +{ + _config+:: { + memberlist_ring_enabled: true, + multikv_migration_enabled: false, // Changed in this step. + multikv_mirror_enabled: false, + multikv_switch_primary_secondary: true, + multikv_migration_teardown: true, // Added in this step. + } +} +``` + +This configuration change will cause a new rollout of all components. +After the restart components will no longer use `multi` KV store and will be configured to use memberlist only. +We use `multikv_migration_teardown` to preserve runtime configuration for `multi` KV store for components that haven't restarted yet. + +All `cortex_multikv_*` metrics are only exposed by components that use `multi` KV store. As components restart, these metrics will disappear. + +> **Note**: setting `multikv_migration_enabled: false` while keeping `memberlist_ring_enabled: true` will also remove Consul! That's expected, since Consul is not used anymore – mirroring to it was disabled in step 4. + +If you need to keep consul running, you can explicitly set `consul_enabled: true` in `_config`. + +## Step 6: Cleanup + +We have successfully migrated Mimir cluster from using Consul to memberlist without any downtime! +As a final step, we can remove all migration-related config options: + +- `multikv_migration_enabled` +- `multikv_mirror_enabled` +- `multikv_switch_primary_secondary` +- `multikv_migration_teardown` + +Our final memberlist configuration will be: + +```jsonnet +{ + _config+:: { + memberlist_ring_enabled: true, + } +} +``` + +This will not trigger new restart of Mimir components. After applying this change, you are finished. diff --git a/docs/sources/operators-guide/reference-http-api/index.md b/docs/sources/operators-guide/reference-http-api/index.md index a8f6d0c319..b2240f4eb7 100644 --- a/docs/sources/operators-guide/reference-http-api/index.md +++ b/docs/sources/operators-guide/reference-http-api/index.md @@ -32,6 +32,7 @@ This document groups API endpoints by service. Note that the API endpoints are e | [Pprof](#pprof) | _All services_ | `GET /debug/pprof` | | [Fgprof](#fgprof) | _All services_ | `GET /debug/fgprof` | | [Build information](#build-information) | _All services_ | `GET /api/v1/status/buildinfo` | +| [Memberlist cluster](#memberlist-cluster) | _All services_ | `GET /memberlist` | | [Remote write](#remote-write) | Distributor | `POST /api/v1/push` | | [Tenants stats](#tenants-stats) | Distributor | `GET /distributor/all_user_stats` | | [HA tracker status](#ha-tracker-status) | Distributor | `GET /distributor/ha_tracker` | @@ -208,6 +209,18 @@ GET /api/v1/status/buildinfo This endpoint returns in JSON format information about the build and enabled features. The format returned is not identical, but is similar to the [Prometheus Build Information endpoint](https://prometheus.io/docs/prometheus/latest/querying/api/#build-information). +### Memberlist cluster + +``` +GET /memberlist +``` + +This admin page shows information about Memberlist cluster (list of nodes and their health) and KV store (keys and values in the KV store). + +If memberlist message history is enabled, this page also shows all received and sent messages stored in the buffers. +This can be useful for troubleshooting memberlist cluster. +To enable message history buffers use `-memberlist.message-history-buffer-bytes` CLI flag or the corresponding YAML configuration parameter. + ## Distributor The following endpoints relate to the [distributor]({{< relref "../architecture/components/distributor.md" >}}). diff --git a/operations/mimir/memberlist.libsonnet b/operations/mimir/memberlist.libsonnet index 26170f77ef..63a4b3879f 100644 --- a/operations/mimir/memberlist.libsonnet +++ b/operations/mimir/memberlist.libsonnet @@ -19,23 +19,7 @@ // but "primary" KV depends on value of multikv_primary. memberlist_ring_enabled: true, - // Migrating from consul to memberlist is a multi-step process: - // - // 1) Enable memberlist_ring_enabled=true and multikv_migration_enabled=true, restart components. - // - // 2) Set multikv_mirror_enabled=true. This doesn't require restart. - // - // 3) Set multikv_switch_primary_secondary=true. This doesn't require restart. From this point on components use memberlist as primary KV store! - // - // 4) Set multikv_mirror_enabled=false. Stop mirroring writes to Consul. Doesn't require restart. - // - // 5) Set multikv_migration_enabled=false and multikv_migration_teardown=true. This requires a restart. - // After restart components will only use memberlist. Using multikv_migration_teardown=true guarantees that runtime config - // with multi KV configuration is preserved for components that haven't restarted yet. - // - // Note: this also removes Consul. That's fine, because it's not used anymore (mirroring to it was disabled in step 4). - // - // 6) Set multikv_migration_teardown=false. This step removes runtime configuration for multi KV. It doesn't require a restart of components. + // To migrate from Consul to Memberlist check "Migrating from Consul to Memberlist KV store for hash rings" article in Mimir documentation. multikv_migration_enabled: false, // Enable multi KV. multikv_migration_teardown: false, // If multikv_migration_enabled=false and multikv_migration_teardown=true, runtime configuration for multi KV is preserved. multikv_switch_primary_secondary: false, // Switch primary and secondary KV stores in runtime configuration for multi KV. From 00bc6b8a8e7591ba9e3f3230fc88c0b7485196ae Mon Sep 17 00:00:00 2001 From: Dimitar Dimitrov Date: Fri, 24 Jun 2022 16:35:57 +0200 Subject: [PATCH 46/63] helm: meta-monitoring (#2068) * Add meta-monitoring Signed-off-by: Dimitar Dimitrov --- .github/workflows/helm-ci.yml | 1 + .github/workflows/test-build-deploy.yml | 2 +- .../contributing-to-helm-chart.md | 8 ++ .../charts/mimir-distributed/CHANGELOG.md | 3 +- .../helm/charts/mimir-distributed/Chart.lock | 7 +- .../helm/charts/mimir-distributed/Chart.yaml | 6 +- .../helm/charts/mimir-distributed/README.md | 1 + .../templates/metamonitoring/_helpers.tpl | 49 +++++++++++ .../grafana-agent-cluster-role-binding.yaml | 19 +++++ .../grafana-agent-cluster-role.yaml | 39 +++++++++ .../grafana-agent-service-account.yaml | 11 +++ .../metamonitoring/grafana-agent.yaml | 31 +++++++ .../kube-state-metrics-servmon.yaml | 44 ++++++++++ .../kubelet-cadvisor-servmon.yaml | 83 +++++++++++++++++++ .../logs-instance-usernames-secret.yaml | 17 ++++ .../metamonitoring/logs-instance.yaml | 38 +++++++++ .../metrics-instance-usernames-secret.yaml | 17 ++++ .../metamonitoring/metrics-instance.yaml | 38 +++++++++ .../templates/metamonitoring/pod-logs.yaml | 45 ++++++++++ .../overrides-exporter-servmon.yaml | 3 + .../mimir-distributed/templates/validate.yaml | 21 +++++ .../helm/charts/mimir-distributed/values.yaml | 79 ++++++++++++++++++ operations/helm/ct.yaml | 1 + operations/helm/tests/build.sh | 5 +- 24 files changed, 562 insertions(+), 6 deletions(-) create mode 100644 operations/helm/charts/mimir-distributed/templates/metamonitoring/_helpers.tpl create mode 100644 operations/helm/charts/mimir-distributed/templates/metamonitoring/grafana-agent-cluster-role-binding.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/metamonitoring/grafana-agent-cluster-role.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/metamonitoring/grafana-agent-service-account.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/metamonitoring/grafana-agent.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/metamonitoring/kube-state-metrics-servmon.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/metamonitoring/kubelet-cadvisor-servmon.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/metamonitoring/logs-instance-usernames-secret.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/metamonitoring/logs-instance.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/metamonitoring/metrics-instance-usernames-secret.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/metamonitoring/metrics-instance.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/metamonitoring/pod-logs.yaml create mode 100644 operations/helm/charts/mimir-distributed/templates/overrides-exporter/overrides-exporter-servmon.yaml diff --git a/.github/workflows/helm-ci.yml b/.github/workflows/helm-ci.yml index 10fb765d25..3fdd286951 100644 --- a/.github/workflows/helm-ci.yml +++ b/.github/workflows/helm-ci.yml @@ -12,3 +12,4 @@ jobs: with: ct_configfile: operations/helm/ct.yaml ct_check_version_increment: false + helm_version: v3.8.2 diff --git a/.github/workflows/test-build-deploy.yml b/.github/workflows/test-build-deploy.yml index 4a1617a7e6..2db7268532 100644 --- a/.github/workflows/test-build-deploy.yml +++ b/.github/workflows/test-build-deploy.yml @@ -87,7 +87,7 @@ jobs: - name: Set up Helm uses: azure/setup-helm@v1 with: - version: v3.5.2 + version: v3.8.2 - name: Check Helm Tests run: make BUILD_IN_CONTAINER=false check-helm-tests diff --git a/docs/internal/contributing/contributing-to-helm-chart.md b/docs/internal/contributing/contributing-to-helm-chart.md index 119e31bb4e..967e6b6fc9 100644 --- a/docs/internal/contributing/contributing-to-helm-chart.md +++ b/docs/internal/contributing/contributing-to-helm-chart.md @@ -25,3 +25,11 @@ If version increase is need, the version is set in the chart itself [operations/ Once a PR that updates the chart version is merged to `main`, it takes a couple of minutes for it to be published in [https://grafana.github.io/helm-charts](https://grafana.github.io/helm-charts) Helm repository. In order to search, template, install, upgrade, etc beta versions of charts, Helm commands require the user to specify the `--devel` flag. This means that checking for whether the beta version is published should be done with `helm search repo --devel`. + +## Linting + +Install [ct](https://github.com/helm/chart-testing) and run + +```bash +ct lint --config operations/helm/ct.yaml --charts operations/helm/charts/mimir-distributed +``` diff --git a/operations/helm/charts/mimir-distributed/CHANGELOG.md b/operations/helm/charts/mimir-distributed/CHANGELOG.md index 7e9be5b5fe..35bfb6bec2 100644 --- a/operations/helm/charts/mimir-distributed/CHANGELOG.md +++ b/operations/helm/charts/mimir-distributed/CHANGELOG.md @@ -29,6 +29,7 @@ Entries should include a reference to the Pull Request that introduced the chang - The remaining arguments are aligned with the rest of the chart's services, please consult the values file to check whether a parameter exists or was renamed. * [CHANGE] Change default value for `blocks_storage.bucket_store.chunks_cache.memcached.timeout` to `450ms` to increase use of cached data. #2035 * [FEATURE] Add `mimir-continuous-test` in smoke-test mode. Use `helm test` to run a smoke test of the read + write path. +* [FEATURE] Add meta-monitoring via the Grafana Agent Kubernetes operator: scrape metrics and collect logs from Mimir pods and ship them to a remote. #2068 * [ENHANCEMENT] ServiceMonitor object will now have default values based on release namesapce in the `namespace` and `namespaceSelector` fields. #2123 * [ENHANCEMENT] Set the `namespace` metadata field for all kubernetes objects to enable using `--namespace` correctly with Helm even if the specified namespace does not exist. #2123 * [ENHANCEMENT] The new value `serviceMonitor.clusterLabel` controls whether to add a `cluster` label and with what content to ServiceMonitor metrics. #2125 @@ -42,7 +43,7 @@ Entries should include a reference to the Pull Request that introduced the chang * [ENHANCEMENT] Add a simple test for enterprise installation #2027 * [ENHANCEMENT] Check for the containerSecurityContext in values file. #2112 * [ENHANCEMENT] Add `NOTES.txt` to show endpoints URLs for the user at install/upgrade. #2189 - +* [ENHANCEMENT] Add ServiceMonitor for overrides-exporter. #2068 ## 2.1.0-beta.7 diff --git a/operations/helm/charts/mimir-distributed/Chart.lock b/operations/helm/charts/mimir-distributed/Chart.lock index 2c58560520..238dcdd630 100644 --- a/operations/helm/charts/mimir-distributed/Chart.lock +++ b/operations/helm/charts/mimir-distributed/Chart.lock @@ -2,5 +2,8 @@ dependencies: - name: minio repository: https://helm.min.io/ version: 8.0.10 -digest: sha256:826b6cc453742c71c2159500596d78666fbdf0ff3ed105caa7ca162ecbd36a45 -generated: "2022-06-09T08:29:05.191797+02:00" +- name: grafana-agent-operator + repository: https://grafana.github.io/helm-charts + version: 0.1.12 +digest: sha256:50d9c12aa13888d04f547a1a17f962765a1866ff7bcd5cb8ecc08ca5eb07223d +generated: "2022-06-23T15:43:38.796114+02:00" diff --git a/operations/helm/charts/mimir-distributed/Chart.yaml b/operations/helm/charts/mimir-distributed/Chart.yaml index f6722f32c7..e2bf08a961 100644 --- a/operations/helm/charts/mimir-distributed/Chart.yaml +++ b/operations/helm/charts/mimir-distributed/Chart.yaml @@ -2,7 +2,6 @@ apiVersion: v2 version: 2.2.0-weekly.191 appVersion: 2.1.0 description: "Grafana Mimir" -engine: gotpl home: https://grafana.com/docs/mimir/v2.1.x/ icon: https://grafana.com/static/img/logos/logo-mimir.svg kubeVersion: ^1.10.0-0 @@ -13,3 +12,8 @@ dependencies: version: 8.0.10 repository: https://helm.min.io/ condition: minio.enabled + - name: grafana-agent-operator + alias: grafana-agent-operator + version: 0.1.12 + repository: https://grafana.github.io/helm-charts + condition: metaMonitoring.grafanaAgent.installOperator diff --git a/operations/helm/charts/mimir-distributed/README.md b/operations/helm/charts/mimir-distributed/README.md index 6599b9fd75..b342f63153 100644 --- a/operations/helm/charts/mimir-distributed/README.md +++ b/operations/helm/charts/mimir-distributed/README.md @@ -14,6 +14,7 @@ Kubernetes: `^1.10.0-0` | Repository | Name | Version | |------------|------|---------| +| https://grafana.github.io/helm-charts | grafana-agent-operator(grafana-agent-operator) | 0.1.12 | | https://helm.min.io/ | minio(minio) | 8.0.10 | ## Dependencies diff --git a/operations/helm/charts/mimir-distributed/templates/metamonitoring/_helpers.tpl b/operations/helm/charts/mimir-distributed/templates/metamonitoring/_helpers.tpl new file mode 100644 index 0000000000..5bcb62b18b --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/metamonitoring/_helpers.tpl @@ -0,0 +1,49 @@ +{{- define "mimir.metaMonitoring.metrics.remoteWrite" -}} +url: {{ .url }} +{{- if .auth }} +basicAuth: +{{- if .auth.username }} + username: + name: {{ include "mimir.resourceName" (dict "ctx" $.ctx "component" "metrics-instance-usernames") }} + key: {{ .usernameKey | quote }} +{{- end }} +{{- with .auth }} +{{- if and .passwordSecretKey .passwordSecretName }} + password: + name: {{ .passwordSecretName | quote }} + key: {{ .passwordSecretKey | quote }} +{{- else if or .passwordSecretKey .passwordSecretName }}{{ required "Set either both passwordSecretKey and passwordSecretName or neither" nil }} +{{- end }} +{{- end }} +{{- end }} +{{- with .headers }} +headers: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- end -}} + +{{- define "mimir.metaMonitoring.logs.client" -}} +url: {{ .url }} +{{- if .auth }} +{{- if .auth.tenantId }} +tenantId: {{ .auth.tenantId | quote }} +{{- end }} +basicAuth: +{{- if .auth.username }} + username: + name: {{ include "mimir.resourceName" (dict "ctx" $.ctx "component" "logs-instance-usernames") }} + key: {{ .usernameKey | quote }} +{{- end }} +{{- with .auth }} +{{- if and .passwordSecretKey .passwordSecretName }} + password: + name: {{ .passwordSecretName | quote }} + key: {{ .passwordSecretKey | quote }} +{{- else if or .passwordSecretKey .passwordSecretName }} +{{ required "Set either both passwordSecretKey and passwordSecretName or neither" nil }} +{{- end }} +{{- end }} +{{- end }} +externalLabels: + cluster: {{ include "mimir.clusterName" $.ctx | quote}} +{{- end -}} \ No newline at end of file diff --git a/operations/helm/charts/mimir-distributed/templates/metamonitoring/grafana-agent-cluster-role-binding.yaml b/operations/helm/charts/mimir-distributed/templates/metamonitoring/grafana-agent-cluster-role-binding.yaml new file mode 100644 index 0000000000..0c625c42a5 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/metamonitoring/grafana-agent-cluster-role-binding.yaml @@ -0,0 +1,19 @@ +{{- with (.Values.metaMonitoring).grafanaAgent }} +{{- if .enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRoleBinding +metadata: + name: {{ include "mimir.resourceName" (dict "ctx" $ "component" "grafana-agent") }} + namespace: {{ .namespace | default $.Release.Namespace | quote }} + labels: + {{- include "mimir.labels" (dict "ctx" $ "component" "meta-monitoring" ) | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "mimir.resourceName" (dict "ctx" $ "component" "grafana-agent") }} +subjects: + - kind: ServiceAccount + name: {{ include "mimir.resourceName" (dict "ctx" $ "component" "grafana-agent") }} + namespace: {{ .namespace | default $.Release.Namespace }} +{{- end }} +{{- end }} diff --git a/operations/helm/charts/mimir-distributed/templates/metamonitoring/grafana-agent-cluster-role.yaml b/operations/helm/charts/mimir-distributed/templates/metamonitoring/grafana-agent-cluster-role.yaml new file mode 100644 index 0000000000..678d9165d2 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/metamonitoring/grafana-agent-cluster-role.yaml @@ -0,0 +1,39 @@ +{{- with (.Values.metaMonitoring).grafanaAgent }} +{{- if .enabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: ClusterRole +metadata: + name: {{ include "mimir.resourceName" (dict "ctx" $ "component" "grafana-agent") }} + namespace: {{ .namespace | default $.Release.Namespace | quote }} + labels: + {{- include "mimir.labels" (dict "ctx" $ "component" "meta-monitoring" ) | nindent 4 }} +rules: + - apiGroups: + - "" + resources: + - nodes + - nodes/proxy + - nodes/metrics + - services + - endpoints + - pods + - events + verbs: + - get + - list + - watch + - apiGroups: + - networking.k8s.io + resources: + - ingresses + verbs: + - get + - list + - watch + - nonResourceURLs: + - /metrics + - /metrics/cadvisor + verbs: + - get +{{- end }} +{{- end }} diff --git a/operations/helm/charts/mimir-distributed/templates/metamonitoring/grafana-agent-service-account.yaml b/operations/helm/charts/mimir-distributed/templates/metamonitoring/grafana-agent-service-account.yaml new file mode 100644 index 0000000000..7c880ed6ac --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/metamonitoring/grafana-agent-service-account.yaml @@ -0,0 +1,11 @@ +{{- with (.Values.metaMonitoring).grafanaAgent }} +{{- if .enabled }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "mimir.resourceName" (dict "ctx" $ "component" "grafana-agent") }} + namespace: {{ .namespace | default $.Release.Namespace | quote }} + labels: + {{- include "mimir.labels" (dict "ctx" $ "component" "meta-monitoring" ) | nindent 4 }} +{{- end }} +{{- end }} diff --git a/operations/helm/charts/mimir-distributed/templates/metamonitoring/grafana-agent.yaml b/operations/helm/charts/mimir-distributed/templates/metamonitoring/grafana-agent.yaml new file mode 100644 index 0000000000..fd957a87a2 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/metamonitoring/grafana-agent.yaml @@ -0,0 +1,31 @@ +{{- with (.Values.metaMonitoring).grafanaAgent }} +{{- if .enabled }} +apiVersion: monitoring.grafana.com/v1alpha1 +kind: GrafanaAgent +metadata: + name: {{ include "mimir.resourceName" (dict "ctx" $ "component" "meta-monitoring") }} + namespace: {{ .namespace | default $.Release.Namespace | quote }} + labels: + {{- include "mimir.labels" (dict "ctx" $ "component" "meta-monitoring" ) | nindent 4 }} + {{- with .labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceAccountName: {{ include "mimir.resourceName" (dict "ctx" $ "component" "grafana-agent") }} + logs: + instanceSelector: + matchLabels: + {{- include "mimir.selectorLabels" (dict "ctx" $ "component" "meta-monitoring") | nindent 8 }} + # cluster label for logs is added in the LogsInstance + metrics: + instanceSelector: + matchLabels: + {{- include "mimir.selectorLabels" (dict "ctx" $ "component" "meta-monitoring") | nindent 8 }} + externalLabels: + cluster: {{ include "mimir.clusterName" $ }} +{{- end }} +{{- end }} diff --git a/operations/helm/charts/mimir-distributed/templates/metamonitoring/kube-state-metrics-servmon.yaml b/operations/helm/charts/mimir-distributed/templates/metamonitoring/kube-state-metrics-servmon.yaml new file mode 100644 index 0000000000..145c101fcf --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/metamonitoring/kube-state-metrics-servmon.yaml @@ -0,0 +1,44 @@ +{{- if and ((.Values.metaMonitoring).grafanaAgent).enabled ((((.Values.metaMonitoring).grafanaAgent).metrics).scrapeK8s).enabled }} +{{- with .Values.serviceMonitor }} +{{- if .enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "mimir.resourceName" (dict "ctx" $ "component" "k8s-ksm") }} + namespace: {{ .namespace | default $.Release.Namespace | quote }} + labels: + {{- include "mimir.labels" (dict "ctx" $ "component" "meta-monitoring") | nindent 4 }} + {{- with .labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: http-metrics + metricRelabelings: + - action: keep + regex: {{ include "mimir.resourceName" (dict "ctx" $) }}.* + sourceLabels: + - deployment + - statefulset + - pod + separator: '' + path: /metrics + honorLabels: true # retain namespace label from kube-state-metrics + {{- with .scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} + {{- with ((((($.Values).metaMonitoring).grafanaAgent).metrics).scrapeK8s).kubeStateMetrics }} + namespaceSelector: + matchNames: + - {{ .namespace }} + selector: + matchLabels: + {{- toYaml .labelSelectors | nindent 6 }} + {{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/operations/helm/charts/mimir-distributed/templates/metamonitoring/kubelet-cadvisor-servmon.yaml b/operations/helm/charts/mimir-distributed/templates/metamonitoring/kubelet-cadvisor-servmon.yaml new file mode 100644 index 0000000000..d6bd60de21 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/metamonitoring/kubelet-cadvisor-servmon.yaml @@ -0,0 +1,83 @@ +{{- if and ((.Values.metaMonitoring).grafanaAgent).enabled ((((.Values.metaMonitoring).grafanaAgent).metrics).scrapeK8s).enabled }} +{{- with .Values.serviceMonitor }} +{{- if .enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "mimir.resourceName" (dict "ctx" $ "component" "k8s-kubelet-cadvisor") }} + namespace: {{ .namespace | default $.Release.Namespace | quote }} + labels: + {{- include "mimir.labels" (dict "ctx" $ "component" "meta-monitoring") | nindent 4 }} + {{- with .labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + endpoints: + - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- with .interval }} + interval: {{ . }} + {{- end }} + {{- with .scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} + port: https-metrics + honorLabels: true # retain namespace label from kubelet + relabelings: + - replacement: kubelet # add so that e.g. up{} metric doesn't get clashes with the other endpoint + targetLabel: source + {{- with .relabelings }} + {{- toYaml . | nindent 8 }} + {{- end }} + metricRelabelings: + - action: keep + regex: storage-{{ include "mimir.resourceName" (dict "ctx" $) }}.* + sourceLabels: + - persistentvolumeclaim # present on kubelet_volume_stats* metrics + - targetLabel: instance # replace so that the metrics work with the default metrics mixin + sourceLabels: + - node + scheme: https + tlsConfig: + caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + - bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- with .interval }} + interval: {{ . }} + {{- end }} + {{- with .scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} + path: /metrics/cadvisor + port: https-metrics + honorLabels: true # retain namespace label from cadvisor + relabelings: + - replacement: cadvisor # add so that e.g. up{} metric doesn't get clashes with the other endpoint + targetLabel: source + - targetLabel: instance # replace so that the metrics work with the default metrics mixin + sourceLabels: + - node + {{- with .relabelings }} + {{- toYaml . | nindent 8 }} + {{- end }} + metricRelabelings: + - action: keep + regex: {{ include "mimir.resourceName" (dict "ctx" $) }}.* + sourceLabels: + - pod + scheme: https + tlsConfig: + caFile: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + namespaceSelector: + matchNames: + # "default" is the default namespace in which the operator creates the kubelet service. + - default + selector: + matchLabels: + # This is a service added by the agent operator, so this labels is hardcoded to what the operator creates. + app.kubernetes.io/name: kubelet +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/operations/helm/charts/mimir-distributed/templates/metamonitoring/logs-instance-usernames-secret.yaml b/operations/helm/charts/mimir-distributed/templates/metamonitoring/logs-instance-usernames-secret.yaml new file mode 100644 index 0000000000..0faefa9b67 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/metamonitoring/logs-instance-usernames-secret.yaml @@ -0,0 +1,17 @@ +{{- if ((.Values.metaMonitoring).grafanaAgent).enabled }} +{{- with ((.Values.metaMonitoring).grafanaAgent).logs }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "mimir.resourceName" (dict "ctx" $ "component" "logs-instance-usernames") }} + namespace: {{ (($.Values.metaMonitoring).grafanaAgent).namespace | default $.Release.Namespace | quote }} + labels: + {{- include "mimir.labels" (dict "ctx" $ "component" "meta-monitoring") | nindent 4 }} +data: + {{- range $i, $cfg := prepend (.additionalClientConfigs | default list) .remote }} + {{- if (($cfg).auth).username }} + username-{{ $i }}: {{ (($cfg).auth).username | b64enc | quote }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/operations/helm/charts/mimir-distributed/templates/metamonitoring/logs-instance.yaml b/operations/helm/charts/mimir-distributed/templates/metamonitoring/logs-instance.yaml new file mode 100644 index 0000000000..de27e6525f --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/metamonitoring/logs-instance.yaml @@ -0,0 +1,38 @@ +{{- with (.Values.metaMonitoring).grafanaAgent }} +{{- if .enabled }} +apiVersion: monitoring.grafana.com/v1alpha1 +kind: LogsInstance +metadata: + name: {{ include "mimir.resourceName" (dict "ctx" $ "component" "meta-monitoring") }} + namespace: {{ (($.Values.metaMonitoring).grafanaAgent).namespace | default $.Release.Namespace | quote }} + labels: + {{- include "mimir.labels" (dict "ctx" $ "component" "meta-monitoring" ) | nindent 4 }} + {{- with .labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + clients: + {{- if or (.logs).additionalClientConfigs (.logs).remote }} + {{- range $i, $cfg := prepend ((.logs).additionalClientConfigs | default list) (.logs).remote }} + {{- with $cfg }} + {{- if $cfg.url }} + - {{- include "mimir.metaMonitoring.logs.client" (dict "ctx" $ "url" .url "auth" .auth "usernameKey" (printf "username-%d" $i)) | nindent 6 -}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + + # Supply an empty namespace selector to look in all namespaces. Remove + # this to only look in the same namespace as the LogsInstance CR + podLogsNamespaceSelector: {} + + podLogsSelector: + matchLabels: + {{- include "mimir.selectorLabels" (dict "ctx" $) | nindent 6 }} + +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/operations/helm/charts/mimir-distributed/templates/metamonitoring/metrics-instance-usernames-secret.yaml b/operations/helm/charts/mimir-distributed/templates/metamonitoring/metrics-instance-usernames-secret.yaml new file mode 100644 index 0000000000..f873d4f6d1 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/metamonitoring/metrics-instance-usernames-secret.yaml @@ -0,0 +1,17 @@ +{{- if .Values.metaMonitoring.grafanaAgent.enabled }} +{{- with .Values.metaMonitoring.grafanaAgent.metrics }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "mimir.resourceName" (dict "ctx" $ "component" "metrics-instance-usernames") }} + namespace: {{ $.Values.metaMonitoring.grafanaAgent.namespace | default $.Release.Namespace | quote }} + labels: + {{- include "mimir.labels" (dict "ctx" $ "component" "meta-monitoring") | nindent 4 }} +data: + {{- range $i, $cfg := prepend (.additionalRemoteWriteConfigs | default list) .remote }} + {{- if (($cfg).auth).username }} + username-{{ $i }}: {{ $cfg.auth.username | b64enc | quote }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/operations/helm/charts/mimir-distributed/templates/metamonitoring/metrics-instance.yaml b/operations/helm/charts/mimir-distributed/templates/metamonitoring/metrics-instance.yaml new file mode 100644 index 0000000000..2b94f61f19 --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/metamonitoring/metrics-instance.yaml @@ -0,0 +1,38 @@ +{{- with .Values.metaMonitoring.grafanaAgent }} +{{- if .enabled }} +apiVersion: monitoring.grafana.com/v1alpha1 +kind: MetricsInstance +metadata: + name: {{ include "mimir.resourceName" (dict "ctx" $ "component" "meta-monitoring") }} + namespace: {{ $.Values.metaMonitoring.grafanaAgent.namespace | default $.Release.Namespace | quote }} + labels: + {{- include "mimir.labels" (dict "ctx" $ "component" "meta-monitoring" ) | nindent 4 }} + {{- with .labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + remoteWrite: + {{- if or (.metrics).additionalRemoteWriteConfigs (.metrics).remote }} + {{- range $i, $cfg := prepend ((.metrics).additionalRemoteWriteConfigs | default list) (.metrics).remote }} + {{- with $cfg }} + {{- if $cfg.url }} + - {{- include "mimir.metaMonitoring.metrics.remoteWrite" (dict "ctx" $ "url" .url "auth" .auth "usernameKey" (printf "username-%d" $i) "headers" .headers ) | nindent 6 -}} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + + # Supply an empty namespace selector to look in all namespaces. Remove + # this to only look in the same namespace as the MetricsInstance CR + serviceMonitorNamespaceSelector: {} + + serviceMonitorSelector: + # Scrape ServiceMonitors from all components + matchLabels: + {{- include "mimir.selectorLabels" (dict "ctx" $) | nindent 6 }} +{{- end }} +{{- end }} diff --git a/operations/helm/charts/mimir-distributed/templates/metamonitoring/pod-logs.yaml b/operations/helm/charts/mimir-distributed/templates/metamonitoring/pod-logs.yaml new file mode 100644 index 0000000000..377a807cda --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/metamonitoring/pod-logs.yaml @@ -0,0 +1,45 @@ +{{- with (.Values.metaMonitoring).grafanaAgent }} +{{- if .enabled }} +apiVersion: monitoring.grafana.com/v1alpha1 +kind: PodLogs +metadata: + name: {{ include "mimir.resourceName" (dict "ctx" $ "component" "meta-monitoring") }} + namespace: {{ .namespace | default $.Release.Namespace | quote }} + labels: + {{- include "mimir.labels" (dict "ctx" $ "component" "meta-monitoring") | nindent 4 }} + {{- with .labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + pipelineStages: + - cri: { } + relabelings: + - action: replace # For consistency with metrics + replacement: $1 + separator: / + sourceLabels: + - __meta_kubernetes_namespace + - __meta_kubernetes_pod_container_name + targetLabel: job + - action: replace # Necessary for slow queries dashboard + sourceLabels: + - __meta_kubernetes_pod_container_name + targetLabel: name + - targetLabel: cluster + replacement: {{ include "mimir.clusterName" $ }} + + namespaceSelector: + matchNames: + - {{ $.Release.Namespace | quote }} + + selector: + matchLabels: + # Scrape logs from all components + {{- include "mimir.selectorLabels" (dict "ctx" $) | nindent 6 }} + +{{- end -}} +{{- end -}} \ No newline at end of file diff --git a/operations/helm/charts/mimir-distributed/templates/overrides-exporter/overrides-exporter-servmon.yaml b/operations/helm/charts/mimir-distributed/templates/overrides-exporter/overrides-exporter-servmon.yaml new file mode 100644 index 0000000000..809d0e75ef --- /dev/null +++ b/operations/helm/charts/mimir-distributed/templates/overrides-exporter/overrides-exporter-servmon.yaml @@ -0,0 +1,3 @@ +{{- if .Values.overrides_exporter.enabled -}} +{{- include "mimir.lib.serviceMonitor" (dict "ctx" $ "component" "overrides-exporter") }} +{{- end -}} diff --git a/operations/helm/charts/mimir-distributed/templates/validate.yaml b/operations/helm/charts/mimir-distributed/templates/validate.yaml index 2cd4b1adb4..abefe109fd 100644 --- a/operations/helm/charts/mimir-distributed/templates/validate.yaml +++ b/operations/helm/charts/mimir-distributed/templates/validate.yaml @@ -23,3 +23,24 @@ {{- if not (has .Values.configStorageType (list "Secret" "ConfigMap")) }} {{- fail "The setting 'configStorageType' should be one of 'Secret' or 'ConfigMap'." }} {{- end }} + +{{- with .Values.metaMonitoring.grafanaAgent }} +{{/* + !servmon.Enabled + && agent.Enabled + && (remote.url != "" || (len(additionalConfigs) > 0 && additionalConfigs.url != "")) +*/}} +{{- if and + (not (($.Values).serviceMonitor).enabled) + .enabled + (or + (not (empty ((.metrics).remote).url)) + (and + (not (empty (.metrics).additionalRemoteWriteConfigs)) + (not (empty (first (.metrics).additionalRemoteWriteConfigs).url)) + ) + ) + }} +{{- fail "metaMonitoring.grafanaAgent.remote.url is set, but serivceMonitor is disabled; you will not see any metrics, so enable the serviceMOnitor or remove the remote configuration" }} +{{- end }} +{{- end }} diff --git a/operations/helm/charts/mimir-distributed/values.yaml b/operations/helm/charts/mimir-distributed/values.yaml index c90a853a97..d0d221c657 100644 --- a/operations/helm/charts/mimir-distributed/values.yaml +++ b/operations/helm/charts/mimir-distributed/values.yaml @@ -1800,3 +1800,82 @@ smoke_test: extraEnvFrom: [] annotations: {} initContainers: [] + +metaMonitoring: + grafanaAgent: + # -- Controls whether to create PodLogs, MetricsInstance, LogsInstance, and GrafanaAgent CRs to scrape the + # ServiceMonitors of the chart and ship metrics and logs to the remote endpoints below. + # Note that you need to configure serviceMonitor in order to have some metrics available. + enabled: false + + # -- Controls whether to install the Grafana Agent Operator and its CRDs. + # Note that helm will not install CRDs if this flag is enabled during an upgrade. + # In that case install the CRDs manually from https://github.com/grafana/agent/tree/main/production/operator/crds + installOperator: false + + logs: + # -- Default destination for logs. The config here is translated to Promtail client + # configuration to write logs to this Loki-compatible remote. Optional. + remote: + # -- Full URL for Loki push endpoint. Usually ends in /loki/api/v1/push + url: '' + + auth: + # -- Used to set X-Scope-OrgID header on requests. Usually not used in combination with username and password. + tenantId: '' + + # -- Basic authentication username. Optional. + username: '' + + # -- The value under key passwordSecretKey in this secret will be used as the basic authentication password. Required only if passwordSecretKey is set. + passwordSecretName: '' + # -- The value under this key in passwordSecretName will be used as the basic authentication password. Required only if passwordSecretName is set. + passwordSecretKey: '' + + # -- Client configurations for the LogsInstance that will scrape Mimir pods. Follows the format of .remote. + additionalClientConfigs: [] + + metrics: + # -- Default destination for metrics. The config here is translated to remote_write + # configuration to push metrics to this Prometheus-compatible remote. Optional. + # Note that you need to configure serviceMonitor in order to have some metrics available. + remote: + # -- Full URL for Prometheus remote-write. Usually ends in /push + url: '' + + # -- Used to add HTTP headers to remote-write requests. + headers: {} + auth: + # -- Basic authentication username. Optional. + username: '' + + # -- The value under key passwordSecretKey in this secret will be used as the basic authentication password. Required only if passwordSecretKey is set. + passwordSecretName: '' + # -- The value under this key in passwordSecretName will be used as the basic authentication password. Required only if passwordSecretName is set. + passwordSecretKey: '' + + # -- Additional remote-write for the MetricsInstance that will scrape Mimir pods. Follows the format of .remote. + additionalRemoteWriteConfigs: [] + + scrapeK8s: + # -- When grafanaAgent.enabled and serviceMonitor.enabled, controls whether to create ServiceMonitors CRs + # for cadvisor, kubelet, and kube-state-metrics. The scraped metrics are reduced to those pertaining to + # Mimir pods only. + enabled: true + + # -- Controls service discovery of kube-state-metrics. + kubeStateMetrics: + namespace: kube-system + labelSelectors: + app.kubernetes.io/name: kube-state-metrics + + # -- Sets the namespace of the resources. Leave empty or unset to use the same namespace as the Helm release. + namespace: '' + + # -- Labels to add to all monitoring.grafana.com custom resources. + # Does not affect the ServiceMonitors for kubernetes metrics; use serviceMonitor.labels for that. + labels: {} + + # -- Annotations to add to all monitoring.grafana.com custom resources. + # Does not affect the ServiceMonitors for kubernetes metrics; use serviceMonitor.annotations for that. + annotations: {} diff --git a/operations/helm/ct.yaml b/operations/helm/ct.yaml index a8403230a2..333e86ce8a 100644 --- a/operations/helm/ct.yaml +++ b/operations/helm/ct.yaml @@ -5,5 +5,6 @@ chart-dirs: - operations/helm/charts chart-repos: - minio=https://helm.min.io + - grafana=https://grafana.github.io/helm-charts helm-extra-args: --timeout 600s validate-maintainers: false diff --git a/operations/helm/tests/build.sh b/operations/helm/tests/build.sh index d3f8920b33..755e533122 100755 --- a/operations/helm/tests/build.sh +++ b/operations/helm/tests/build.sh @@ -3,6 +3,9 @@ set -euo pipefail +# use a normal sed on macOS if available +SED=$(which gsed || which sed) + CHART_PATH=operations/helm/charts/mimir-distributed # Start from a clean slate @@ -23,6 +26,6 @@ for FILEPATH in $TESTS; do helm template "${TEST_NAME}" ${CHART_PATH} -f "${FILEPATH}" --output-dir "${OUTPUT_DIR}" --namespace citestns echo "Removing mutable config checksum, helm chart, image tag version for clarity" - find "${OUTPUT_DIR}/$(basename ${CHART_PATH})/templates" -type f -print0 | xargs -0 sed -E -i -- "/^\s+(checksum\/config|(helm.sh\/)?chart|image: \"grafana\/(mimir|enterprise-metrics)):/d" + find "${OUTPUT_DIR}/$(basename ${CHART_PATH})/templates" -type f -print0 | xargs -0 "${SED}" -E -i -- "/^\s+(checksum\/config|(helm.sh\/)?chart|image: \"grafana\/(mimir|enterprise-metrics)):/d" done From 4098f1624c4b952a32880a5cc82a11c4c4104b5e Mon Sep 17 00:00:00 2001 From: zenador Date: Sat, 25 Jun 2022 00:11:11 +0800 Subject: [PATCH 47/63] Change the error message template for per-tenant limits (#2234) --- pkg/util/globalerror/errors.go | 2 +- pkg/util/globalerror/errors_test.go | 6 +++--- pkg/util/validation/errors_test.go | 12 ++++++------ 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/pkg/util/globalerror/errors.go b/pkg/util/globalerror/errors.go index 966bc9bae1..9226d0fb94 100644 --- a/pkg/util/globalerror/errors.go +++ b/pkg/util/globalerror/errors.go @@ -84,5 +84,5 @@ func (id ID) MessageWithLimitConfig(msg, flag string, addFlags ...string) string sb.WriteString(" and -") sb.WriteString(addFlags[len(addFlags)-1]) } - return fmt.Sprintf("%s (%s%s). You can adjust the related per-tenant limit%s by configuring %s, or by contacting your service administrator.", msg, errPrefix, id, plural, sb.String()) + return fmt.Sprintf("%s (%s%s). To adjust the related per-tenant limit%s, configure %s, or contact your service administrator.", msg, errPrefix, id, plural, sb.String()) } diff --git a/pkg/util/globalerror/errors_test.go b/pkg/util/globalerror/errors_test.go index fa99e2000a..338dd485ed 100644 --- a/pkg/util/globalerror/errors_test.go +++ b/pkg/util/globalerror/errors_test.go @@ -21,15 +21,15 @@ func TestID_MessageWithLimitConfig(t *testing.T) { actual string }{ { - expected: "an error (err-mimir-missing-metric-name). You can adjust the related per-tenant limit by configuring -my-flag1, or by contacting your service administrator.", + expected: "an error (err-mimir-missing-metric-name). To adjust the related per-tenant limit, configure -my-flag1, or contact your service administrator.", actual: MissingMetricName.MessageWithLimitConfig("an error", "my-flag1"), }, { - expected: "an error (err-mimir-missing-metric-name). You can adjust the related per-tenant limits by configuring -my-flag1 and -my-flag2, or by contacting your service administrator.", + expected: "an error (err-mimir-missing-metric-name). To adjust the related per-tenant limits, configure -my-flag1 and -my-flag2, or contact your service administrator.", actual: MissingMetricName.MessageWithLimitConfig("an error", "my-flag1", "my-flag2"), }, { - expected: "an error (err-mimir-missing-metric-name). You can adjust the related per-tenant limits by configuring -my-flag1, -my-flag2 and -my-flag3, or by contacting your service administrator.", + expected: "an error (err-mimir-missing-metric-name). To adjust the related per-tenant limits, configure -my-flag1, -my-flag2 and -my-flag3, or contact your service administrator.", actual: MissingMetricName.MessageWithLimitConfig("an error", "my-flag1", "my-flag2", "my-flag3"), }, } { diff --git a/pkg/util/validation/errors_test.go b/pkg/util/validation/errors_test.go index b11bceeb48..f13a932f6a 100644 --- a/pkg/util/validation/errors_test.go +++ b/pkg/util/validation/errors_test.go @@ -18,30 +18,30 @@ func TestNewMetadataMetricNameMissingError(t *testing.T) { func TestNewMetadataMetricNameTooLongError(t *testing.T) { err := newMetadataMetricNameTooLongError(&mimirpb.MetricMetadata{MetricFamilyName: "test_metric", Unit: "counter", Help: "This is a test metric."}) - assert.Equal(t, "received a metric metadata whose metric name length exceeds the limit, metric name: 'test_metric' (err-mimir-metric-name-too-long). You can adjust the related per-tenant limit by configuring -validation.max-metadata-length, or by contacting your service administrator.", err.Error()) + assert.Equal(t, "received a metric metadata whose metric name length exceeds the limit, metric name: 'test_metric' (err-mimir-metric-name-too-long). To adjust the related per-tenant limit, configure -validation.max-metadata-length, or contact your service administrator.", err.Error()) } func TestNewMetadataHelpTooLongError(t *testing.T) { err := newMetadataHelpTooLongError(&mimirpb.MetricMetadata{MetricFamilyName: "test_metric", Unit: "counter", Help: "This is a test metric."}) - assert.Equal(t, "received a metric metadata whose help description length exceeds the limit, help: 'This is a test metric.' metric name: 'test_metric' (err-mimir-help-too-long). You can adjust the related per-tenant limit by configuring -validation.max-metadata-length, or by contacting your service administrator.", err.Error()) + assert.Equal(t, "received a metric metadata whose help description length exceeds the limit, help: 'This is a test metric.' metric name: 'test_metric' (err-mimir-help-too-long). To adjust the related per-tenant limit, configure -validation.max-metadata-length, or contact your service administrator.", err.Error()) } func TestNewMetadataUnitTooLongError(t *testing.T) { err := newMetadataUnitTooLongError(&mimirpb.MetricMetadata{MetricFamilyName: "test_metric", Unit: "counter", Help: "This is a test metric."}) - assert.Equal(t, "received a metric metadata whose unit name length exceeds the limit, unit: 'counter' metric name: 'test_metric' (err-mimir-unit-too-long). You can adjust the related per-tenant limit by configuring -validation.max-metadata-length, or by contacting your service administrator.", err.Error()) + assert.Equal(t, "received a metric metadata whose unit name length exceeds the limit, unit: 'counter' metric name: 'test_metric' (err-mimir-unit-too-long). To adjust the related per-tenant limit, configure -validation.max-metadata-length, or contact your service administrator.", err.Error()) } func TestNewMaxQueryLengthError(t *testing.T) { err := NewMaxQueryLengthError(time.Hour, time.Minute) - assert.Equal(t, "the query time range exceeds the limit (query length: 1h0m0s, limit: 1m0s) (err-mimir-max-query-length). You can adjust the related per-tenant limit by configuring -store.max-query-length, or by contacting your service administrator.", err.Error()) + assert.Equal(t, "the query time range exceeds the limit (query length: 1h0m0s, limit: 1m0s) (err-mimir-max-query-length). To adjust the related per-tenant limit, configure -store.max-query-length, or contact your service administrator.", err.Error()) } func TestNewRequestRateLimitedError(t *testing.T) { err := NewRequestRateLimitedError(10, 5) - assert.Equal(t, "the request has been rejected because the tenant exceeded the request rate limit, set to 10 requests/s across all distributors with a maximum allowed burst of 5 (err-mimir-tenant-max-request-rate). You can adjust the related per-tenant limits by configuring -distributor.request-rate-limit and -distributor.request-burst-size, or by contacting your service administrator.", err.Error()) + assert.Equal(t, "the request has been rejected because the tenant exceeded the request rate limit, set to 10 requests/s across all distributors with a maximum allowed burst of 5 (err-mimir-tenant-max-request-rate). To adjust the related per-tenant limits, configure -distributor.request-rate-limit and -distributor.request-burst-size, or contact your service administrator.", err.Error()) } func TestNewIngestionRateLimitedError(t *testing.T) { err := NewIngestionRateLimitedError(10, 5) - assert.Equal(t, "the request has been rejected because the tenant exceeded the ingestion rate limit, set to 10 items/s with a maximum allowed burst of 5. This limit is applied on the total number of samples, exemplars and metadata received across all distributors (err-mimir-tenant-max-ingestion-rate). You can adjust the related per-tenant limits by configuring -distributor.ingestion-rate-limit and -distributor.ingestion-burst-size, or by contacting your service administrator.", err.Error()) + assert.Equal(t, "the request has been rejected because the tenant exceeded the ingestion rate limit, set to 10 items/s with a maximum allowed burst of 5. This limit is applied on the total number of samples, exemplars and metadata received across all distributors (err-mimir-tenant-max-ingestion-rate). To adjust the related per-tenant limits, configure -distributor.ingestion-rate-limit and -distributor.ingestion-burst-size, or contact your service administrator.", err.Error()) } From 2279f67d538f843b6f92c1d29e53ff6d1722c351 Mon Sep 17 00:00:00 2001 From: Patryk Prus Date: Fri, 24 Jun 2022 12:19:24 -0400 Subject: [PATCH 48/63] Add tests for user metadata in the ingester (#2184) * Add tests for user metadata in the ingester * Assert for no-error case and address flakiness * Verify correct errors are emitted --- pkg/ingester/user_metrics_metadata_test.go | 121 +++++++++++++++++++++ 1 file changed, 121 insertions(+) create mode 100644 pkg/ingester/user_metrics_metadata_test.go diff --git a/pkg/ingester/user_metrics_metadata_test.go b/pkg/ingester/user_metrics_metadata_test.go new file mode 100644 index 0000000000..8eae222dfe --- /dev/null +++ b/pkg/ingester/user_metrics_metadata_test.go @@ -0,0 +1,121 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package ingester + +import ( + "testing" + "time" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/grafana/mimir/pkg/mimirpb" + "github.com/grafana/mimir/pkg/util/validation" +) + +func TestUserMetricsMetadata(t *testing.T) { + type input struct { + meta mimirpb.MetricMetadata + errContains string + } + + tests := map[string]struct { + maxMetadataPerUser int + maxMetadataPerMetric int + inputMetadata []input + expectedMetadata []*mimirpb.MetricMetadata + }{ + "should succeed for multiple metadata per metric": { + inputMetadata: []input{ + {meta: mimirpb.MetricMetadata{Type: mimirpb.COUNTER, MetricFamilyName: "test_metric_1", Help: "foo"}}, + {meta: mimirpb.MetricMetadata{Type: mimirpb.COUNTER, MetricFamilyName: "test_metric_1", Help: "bar"}}, + {meta: mimirpb.MetricMetadata{Type: mimirpb.COUNTER, MetricFamilyName: "test_metric_2", Help: "baz"}}, + {meta: mimirpb.MetricMetadata{Type: mimirpb.COUNTER, MetricFamilyName: "test_metric_2", Help: "qux"}}, + }, + expectedMetadata: []*mimirpb.MetricMetadata{ + {Type: mimirpb.COUNTER, MetricFamilyName: "test_metric_1", Help: "foo"}, + {Type: mimirpb.COUNTER, MetricFamilyName: "test_metric_1", Help: "bar"}, + {Type: mimirpb.COUNTER, MetricFamilyName: "test_metric_2", Help: "baz"}, + {Type: mimirpb.COUNTER, MetricFamilyName: "test_metric_2", Help: "qux"}, + }, + }, + "should fail when metadata per user limit reached": { + maxMetadataPerUser: 1, + inputMetadata: []input{ + {meta: mimirpb.MetricMetadata{Type: mimirpb.COUNTER, MetricFamilyName: "test_metric_1", Help: "foo"}}, + {meta: mimirpb.MetricMetadata{Type: mimirpb.COUNTER, MetricFamilyName: "test_metric_1", Help: "bar"}}, + {meta: mimirpb.MetricMetadata{Type: mimirpb.COUNTER, MetricFamilyName: "test_metric_2", Help: "baz"}, errContains: "err-mimir-max-metadata-per-user"}, + {meta: mimirpb.MetricMetadata{Type: mimirpb.COUNTER, MetricFamilyName: "test_metric_2", Help: "qux"}, errContains: "err-mimir-max-metadata-per-user"}, + }, + expectedMetadata: []*mimirpb.MetricMetadata{ + {Type: mimirpb.COUNTER, MetricFamilyName: "test_metric_1", Help: "foo"}, + {Type: mimirpb.COUNTER, MetricFamilyName: "test_metric_1", Help: "bar"}, + }, + }, + "should fail when metadata per metric limit reached": { + maxMetadataPerMetric: 1, + inputMetadata: []input{ + {meta: mimirpb.MetricMetadata{Type: mimirpb.COUNTER, MetricFamilyName: "test_metric_1", Help: "foo"}}, + {meta: mimirpb.MetricMetadata{Type: mimirpb.COUNTER, MetricFamilyName: "test_metric_1", Help: "bar"}, errContains: "err-mimir-max-metadata-per-metric"}, + {meta: mimirpb.MetricMetadata{Type: mimirpb.COUNTER, MetricFamilyName: "test_metric_2", Help: "baz"}}, + {meta: mimirpb.MetricMetadata{Type: mimirpb.COUNTER, MetricFamilyName: "test_metric_2", Help: "qux"}, errContains: "err-mimir-max-metadata-per-metric"}, + }, + expectedMetadata: []*mimirpb.MetricMetadata{ + {Type: mimirpb.COUNTER, MetricFamilyName: "test_metric_1", Help: "foo"}, + {Type: mimirpb.COUNTER, MetricFamilyName: "test_metric_2", Help: "baz"}, + }, + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + // Mock the ring + ring := &ringCountMock{} + ring.On("HealthyInstancesCount").Return(1) + ring.On("ZonesCount").Return(1) + + // Mock limiter + limits, err := validation.NewOverrides(validation.Limits{ + MaxGlobalMetricsWithMetadataPerUser: testData.maxMetadataPerUser, + MaxGlobalMetadataPerMetric: testData.maxMetadataPerMetric, + }, nil) + require.NoError(t, err) + limiter := NewLimiter(limits, ring, 1, false) + + // Mock metrics + metrics := newIngesterMetrics( + prometheus.NewPedanticRegistry(), + true, + func() *InstanceLimits { return defaultInstanceLimits }, + nil, + nil, + ) + + mm := newMetadataMap(limiter, metrics, "test") + + // Attempt to add all metadata + for _, i := range testData.inputMetadata { + err := mm.add(i.meta.MetricFamilyName, &i.meta) + + if i.errContains != "" { + require.Error(t, err) + require.Contains(t, err.Error(), i.errContains) + } else { + require.NoError(t, err) + } + } + + // Verify expected elements are stored + clientMeta := mm.toClientMetadata() + assert.ElementsMatch(t, testData.expectedMetadata, clientMeta) + + // Purge all metadata + mm.purge(time.Time{}) + + // Verify all metadata purged + clientMeta = mm.toClientMetadata() + assert.Empty(t, clientMeta) + }) + } +} From 4679c7549ef2e80c4d6a79a1c93175a0259ca939 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> Date: Fri, 24 Jun 2022 21:52:34 +0530 Subject: [PATCH 49/63] Fix post merge reviews on 2187 (#2230) * Fix post merge reviews on 2187 Signed-off-by: Ganesh Vernekar * Update docs/sources/operators-guide/mimir-runbooks/_index.md Co-authored-by: Marco Pracucci --- cmd/mimir/config-descriptor.json | 10 +++++----- cmd/mimir/help-all.txt.tmpl | 6 +++--- .../reference-configuration-parameters/index.md | 13 ++++++------- .../operators-guide/mimir-runbooks/_index.md | 7 ++----- pkg/ingester/ingester.go | 16 +++++++++------- pkg/ingester/ingester_test.go | 4 ++++ pkg/ingester/metrics_test.go | 6 ------ pkg/storage/tsdb/config.go | 8 ++++---- pkg/util/globalerror/errors.go | 1 - pkg/util/validation/limits.go | 2 +- 10 files changed, 34 insertions(+), 39 deletions(-) diff --git a/cmd/mimir/config-descriptor.json b/cmd/mimir/config-descriptor.json index e9edf6f8d5..655d157240 100644 --- a/cmd/mimir/config-descriptor.json +++ b/cmd/mimir/config-descriptor.json @@ -2652,7 +2652,7 @@ "kind": "field", "name": "out_of_order_time_window", "required": false, - "desc": "Non-zero value enables out-of-order support for most recent samples that are within the time window in relation to the following two conditions: (1) The newest sample for that time series, if it exists. For example, within [series.maxTime-timeWindow, series.maxTime]). (2) The TSDB's maximum time, if the series does not exist. For example, within [db.maxTime-timeWindow, db.maxTime]). The ingester will need more memory as a factor of _rate of out-of-order samples being ingested_ and _the number of series that are getting out-of-order samples_. You can configure it per tenant.", + "desc": "Non-zero value enables out-of-order support for most recent samples that are within the time window in relation to the following two conditions: (1) The newest sample for that time series, if it exists. For example, within [series.maxTime-timeWindow, series.maxTime]). (2) The TSDB's maximum time, if the series does not exist. For example, within [db.maxTime-timeWindow, db.maxTime]). The ingester will need more memory as a factor of rate of out-of-order samples being ingested and the number of series that are getting out-of-order samples.", "fieldValue": null, "fieldDefaultValue": 0, "fieldFlag": "ingester.out-of-order-time-window", @@ -5415,23 +5415,23 @@ }, { "kind": "field", - "name": "out_of_order_cap_min", + "name": "out_of_order_capacity_min", "required": false, "desc": "Minimum capacity for out-of-order chunks, in samples between 0 and 255.", "fieldValue": null, "fieldDefaultValue": 4, - "fieldFlag": "blocks-storage.tsdb.out-of-order-cap-min", + "fieldFlag": "blocks-storage.tsdb.out-of-order-capacity-min", "fieldType": "int", "fieldCategory": "experimental" }, { "kind": "field", - "name": "out_of_order_cap_max", + "name": "out_of_order_capacity_max", "required": false, "desc": "Maximum capacity for out of order chunks, in samples between 1 and 255.", "fieldValue": null, "fieldDefaultValue": 32, - "fieldFlag": "blocks-storage.tsdb.out-of-order-cap-max", + "fieldFlag": "blocks-storage.tsdb.out-of-order-capacity-max", "fieldType": "int", "fieldCategory": "experimental" } diff --git a/cmd/mimir/help-all.txt.tmpl b/cmd/mimir/help-all.txt.tmpl index 0d46d8d8be..373f4e5000 100644 --- a/cmd/mimir/help-all.txt.tmpl +++ b/cmd/mimir/help-all.txt.tmpl @@ -505,9 +505,9 @@ Usage of ./cmd/mimir/mimir: [experimental] True to enable snapshotting of in-memory TSDB data on disk when shutting down. -blocks-storage.tsdb.new-chunk-disk-mapper [experimental] Temporary flag to select whether to use the new (used in upstream Prometheus) or the old (legacy) chunk disk mapper. - -blocks-storage.tsdb.out-of-order-cap-max int + -blocks-storage.tsdb.out-of-order-capacity-max int [experimental] Maximum capacity for out of order chunks, in samples between 1 and 255. (default 32) - -blocks-storage.tsdb.out-of-order-cap-min int + -blocks-storage.tsdb.out-of-order-capacity-min int [experimental] Minimum capacity for out-of-order chunks, in samples between 0 and 255. (default 4) -blocks-storage.tsdb.retention-period duration TSDB blocks retention in the ingester before a block is removed, relative to the newest block written for the tenant. This should be larger than the -blocks-storage.tsdb.block-ranges-period, -querier.query-store-after and large enough to give store-gateways and queriers enough time to discover newly uploaded blocks. (default 24h0m0s) @@ -866,7 +866,7 @@ Usage of ./cmd/mimir/mimir: -ingester.metadata-retain-period duration Period at which metadata we have not seen will remain in memory before being deleted. (default 10m0s) -ingester.out-of-order-time-window value - [experimental] Non-zero value enables out-of-order support for most recent samples that are within the time window in relation to the following two conditions: (1) The newest sample for that time series, if it exists. For example, within [series.maxTime-timeWindow, series.maxTime]). (2) The TSDB's maximum time, if the series does not exist. For example, within [db.maxTime-timeWindow, db.maxTime]). The ingester will need more memory as a factor of _rate of out-of-order samples being ingested_ and _the number of series that are getting out-of-order samples_. You can configure it per tenant. + [experimental] Non-zero value enables out-of-order support for most recent samples that are within the time window in relation to the following two conditions: (1) The newest sample for that time series, if it exists. For example, within [series.maxTime-timeWindow, series.maxTime]). (2) The TSDB's maximum time, if the series does not exist. For example, within [db.maxTime-timeWindow, db.maxTime]). The ingester will need more memory as a factor of rate of out-of-order samples being ingested and the number of series that are getting out-of-order samples. -ingester.rate-update-period duration Period with which to update the per-tenant ingestion rates. (default 15s) -ingester.ring.consul.acl-token string diff --git a/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md b/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md index 43fe972a73..1c6a82d9da 100644 --- a/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md +++ b/docs/sources/operators-guide/configuring/reference-configuration-parameters/index.md @@ -2723,9 +2723,8 @@ The `limits` block configures default and per-tenant limits imposed by component # example, within [series.maxTime-timeWindow, series.maxTime]). (2) The TSDB's # maximum time, if the series does not exist. For example, within # [db.maxTime-timeWindow, db.maxTime]). The ingester will need more memory as a -# factor of _rate of out-of-order samples being ingested_ and _the number of -# series that are getting out-of-order samples_. You can configure it per -# tenant. +# factor of rate of out-of-order samples being ingested and the number of series +# that are getting out-of-order samples. # CLI flag: -ingester.out-of-order-time-window [out_of_order_time_window: | default = 0s] @@ -3539,13 +3538,13 @@ tsdb: # (experimental) Minimum capacity for out-of-order chunks, in samples between # 0 and 255. - # CLI flag: -blocks-storage.tsdb.out-of-order-cap-min - [out_of_order_cap_min: | default = 4] + # CLI flag: -blocks-storage.tsdb.out-of-order-capacity-min + [out_of_order_capacity_min: | default = 4] # (experimental) Maximum capacity for out of order chunks, in samples between # 1 and 255. - # CLI flag: -blocks-storage.tsdb.out-of-order-cap-max - [out_of_order_cap_max: | default = 32] + # CLI flag: -blocks-storage.tsdb.out-of-order-capacity-max + [out_of_order_capacity_max: | default = 32] ``` ### compactor diff --git a/docs/sources/operators-guide/mimir-runbooks/_index.md b/docs/sources/operators-guide/mimir-runbooks/_index.md index 179b7f96ec..e4c976d0d9 100644 --- a/docs/sources/operators-guide/mimir-runbooks/_index.md +++ b/docs/sources/operators-guide/mimir-runbooks/_index.md @@ -1401,6 +1401,8 @@ How it **works**: - If the incoming timestamp is more than 1 hour older than the most recent timestamp ingested for the tenant, the sample will be rejected. +> **Note**: If the out-of-order sample ingestion is enabled, then this error is similar to `err-mimir-sample-out-of-order` below with a difference that the sample is older than the out-of-order time window as it relates to the latest sample for that particular time series or the TSDB. + ### err-mimir-sample-out-of-order This error occurs when the ingester rejects a sample because another sample with a more recent timestamp has already been ingested. @@ -1419,11 +1421,6 @@ Common **causes**: > **Note**: You can learn more about out of order samples in Prometheus, in the blog post [Debugging out of order samples](https://www.robustperception.io/debugging-out-of-order-samples/). -### err-mimir-sample-too-old - -This error is similar to `err-mimir-sample-out-of-order`. The main difference is that the out-of-order support is enabled, but the sample is -older than the out-of-order time window as it relates to the latest sample for that particular time series or the TSDB. - ### err-mimir-sample-duplicate-timestamp This error occurs when the ingester rejects a sample because it is a duplicate of a previously received sample with the same timestamp but different value in the same time series. diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go index 08b6395b3e..9b2d4e8743 100644 --- a/pkg/ingester/ingester.go +++ b/pkg/ingester/ingester.go @@ -697,7 +697,9 @@ func (i *Ingester) PushWithCleanup(ctx context.Context, req *mimirpb.WriteReques case storage.ErrTooOldSample: sampleTooOldCount++ - updateFirstPartial(func() error { return newIngestErrSampleTooOld(model.Time(s.TimestampMs), ts.Labels) }) + updateFirstPartial(func() error { + return newIngestErrSampleTimestampTooOldOOOEnabled(model.Time(s.TimestampMs), ts.Labels, oooTW) + }) continue case storage.ErrDuplicateSampleForTimestamp: @@ -1502,8 +1504,8 @@ func (i *Ingester) createTSDB(userID string) (*userTSDB, error) { AllowOverlappingQueries: true, // We can have overlapping blocks from past or out-of-order enabled during runtime. AllowOverlappingCompaction: false, // always false since Mimir only uploads lvl 1 compacted blocks OutOfOrderTimeWindow: oooTW.Milliseconds(), // The unit must be same as our timestamps. - OutOfOrderCapMin: int64(i.cfg.BlocksStorageConfig.TSDB.OutOfOrderCapMin), - OutOfOrderCapMax: int64(i.cfg.BlocksStorageConfig.TSDB.OutOfOrderCapMax), + OutOfOrderCapMin: int64(i.cfg.BlocksStorageConfig.TSDB.OutOfOrderCapacityMin), + OutOfOrderCapMax: int64(i.cfg.BlocksStorageConfig.TSDB.OutOfOrderCapacityMax), }, nil) if err != nil { return nil, errors.Wrapf(err, "failed to open TSDB: %s", udir) @@ -2109,12 +2111,12 @@ func newIngestErrSampleTimestampTooOld(timestamp model.Time, labels []mimirpb.La return newIngestErr(globalerror.SampleTimestampTooOld, "the sample has been rejected because its timestamp is too old", timestamp, labels) } -func newIngestErrSampleOutOfOrder(timestamp model.Time, labels []mimirpb.LabelAdapter) error { - return newIngestErr(globalerror.SampleOutOfOrder, "the sample has been rejected because another sample with a more recent timestamp has already been ingested and out-of-order samples are not allowed", timestamp, labels) +func newIngestErrSampleTimestampTooOldOOOEnabled(timestamp model.Time, labels []mimirpb.LabelAdapter, oooTimeWindow model.Duration) error { + return newIngestErr(globalerror.SampleTimestampTooOld, fmt.Sprintf("the sample has been rejected because another sample with a more recent timestamp has already been ingested and this sample is beyond the out-of-order time window of %s", oooTimeWindow.String()), timestamp, labels) } -func newIngestErrSampleTooOld(timestamp model.Time, labels []mimirpb.LabelAdapter) error { - return newIngestErr(globalerror.SampleTooOld, "the sample has been rejected because another sample with a more recent timestamp has already been ingested and this sample is beyond the out-of-order time window", timestamp, labels) +func newIngestErrSampleOutOfOrder(timestamp model.Time, labels []mimirpb.LabelAdapter) error { + return newIngestErr(globalerror.SampleOutOfOrder, "the sample has been rejected because another sample with a more recent timestamp has already been ingested and out-of-order samples are not allowed", timestamp, labels) } func newIngestErrSampleDuplicateTimestamp(timestamp model.Time, labels []mimirpb.LabelAdapter) error { diff --git a/pkg/ingester/ingester_test.go b/pkg/ingester/ingester_test.go index a112bc7a71..60a8e12287 100644 --- a/pkg/ingester/ingester_test.go +++ b/pkg/ingester/ingester_test.go @@ -5895,6 +5895,10 @@ func TestNewIngestErrMsgs(t *testing.T) { err: newIngestErrSampleTimestampTooOld(timestamp, metricLabelAdapters), msg: `the sample has been rejected because its timestamp is too old (err-mimir-sample-timestamp-too-old). The affected sample has timestamp 1970-01-19T05:30:43.969Z and is from series {__name__="test"}`, }, + "newIngestErrSampleTimestampTooOld_out_of_order_enabled": { + err: newIngestErrSampleTimestampTooOldOOOEnabled(timestamp, metricLabelAdapters, model.Duration(2*time.Hour)), + msg: `the sample has been rejected because another sample with a more recent timestamp has already been ingested and this sample is beyond the out-of-order time window of 2h (err-mimir-sample-timestamp-too-old). The affected sample has timestamp 1970-01-19T05:30:43.969Z and is from series {__name__="test"}`, + }, "newIngestErrSampleOutOfOrder": { err: newIngestErrSampleOutOfOrder(timestamp, metricLabelAdapters), msg: `the sample has been rejected because another sample with a more recent timestamp has already been ingested and out-of-order samples are not allowed (err-mimir-sample-out-of-order). The affected sample has timestamp 1970-01-19T05:30:43.969Z and is from series {__name__="test"}`, diff --git a/pkg/ingester/metrics_test.go b/pkg/ingester/metrics_test.go index 0942dafb9d..e870e07337 100644 --- a/pkg/ingester/metrics_test.go +++ b/pkg/ingester/metrics_test.go @@ -749,11 +749,5 @@ func populateTSDBMetrics(base float64) *prometheus.Registry { }) outOfOrderSamplesAppendedTotal.Add(3) - tooOldSamplesTotal := promauto.With(r).NewCounter(prometheus.CounterOpts{ - Name: "prometheus_tsdb_too_old_samples_total", - Help: "Total number of out-of-order samples ingestion failed attempts.", - }) - tooOldSamplesTotal.Add(3) - return r } diff --git a/pkg/storage/tsdb/config.go b/pkg/storage/tsdb/config.go index c35a5723c3..9d530e3921 100644 --- a/pkg/storage/tsdb/config.go +++ b/pkg/storage/tsdb/config.go @@ -179,8 +179,8 @@ type TSDBConfig struct { CloseIdleTSDBInterval time.Duration `yaml:"-"` // For experimental out of order metrics support. - OutOfOrderCapMin int `yaml:"out_of_order_cap_min" category:"experimental"` - OutOfOrderCapMax int `yaml:"out_of_order_cap_max" category:"experimental"` + OutOfOrderCapacityMin int `yaml:"out_of_order_capacity_min" category:"experimental"` + OutOfOrderCapacityMax int `yaml:"out_of_order_capacity_max" category:"experimental"` } // RegisterFlags registers the TSDBConfig flags. @@ -210,8 +210,8 @@ func (cfg *TSDBConfig) RegisterFlags(f *flag.FlagSet) { f.IntVar(&cfg.HeadChunksWriteQueueSize, "blocks-storage.tsdb.head-chunks-write-queue-size", 0, "The size of the write queue used by the head chunks mapper. Lower values reduce memory utilisation at the cost of potentially higher ingest latency. Value of 0 switches chunks mapper to implementation without a queue. This flag is only used if the new chunk disk mapper is enabled with -blocks-storage.tsdb.new-chunk-disk-mapper.") f.BoolVar(&cfg.NewChunkDiskMapper, "blocks-storage.tsdb.new-chunk-disk-mapper", false, "Temporary flag to select whether to use the new (used in upstream Prometheus) or the old (legacy) chunk disk mapper.") f.BoolVar(&cfg.IsolationEnabled, "blocks-storage.tsdb.isolation-enabled", false, "[Deprecated] Enables TSDB isolation feature. Disabling may improve performance.") - f.IntVar(&cfg.OutOfOrderCapMin, "blocks-storage.tsdb.out-of-order-cap-min", 4, "Minimum capacity for out-of-order chunks, in samples between 0 and 255.") - f.IntVar(&cfg.OutOfOrderCapMax, "blocks-storage.tsdb.out-of-order-cap-max", 32, "Maximum capacity for out of order chunks, in samples between 1 and 255.") + f.IntVar(&cfg.OutOfOrderCapacityMin, "blocks-storage.tsdb.out-of-order-capacity-min", 4, "Minimum capacity for out-of-order chunks, in samples between 0 and 255.") + f.IntVar(&cfg.OutOfOrderCapacityMax, "blocks-storage.tsdb.out-of-order-capacity-max", 32, "Maximum capacity for out of order chunks, in samples between 1 and 255.") } // Validate the config. diff --git a/pkg/util/globalerror/errors.go b/pkg/util/globalerror/errors.go index 9226d0fb94..012dd6186d 100644 --- a/pkg/util/globalerror/errors.go +++ b/pkg/util/globalerror/errors.go @@ -55,7 +55,6 @@ const ( SampleTimestampTooOld ID = "sample-timestamp-too-old" SampleOutOfOrder ID = "sample-out-of-order" - SampleTooOld ID = "sample-too-old" SampleDuplicateTimestamp ID = "sample-duplicate-timestamp" ExemplarSeriesMissing ID = "exemplar-series-missing" diff --git a/pkg/util/validation/limits.go b/pkg/util/validation/limits.go index f53ddf30bd..379fcc9207 100644 --- a/pkg/util/validation/limits.go +++ b/pkg/util/validation/limits.go @@ -181,7 +181,7 @@ func (l *Limits) RegisterFlags(f *flag.FlagSet) { f.IntVar(&l.MaxGlobalMetadataPerMetric, MaxMetadataPerMetricFlag, 0, "The maximum number of metadata per metric, across the cluster. 0 to disable.") f.IntVar(&l.MaxGlobalExemplarsPerUser, "ingester.max-global-exemplars-per-user", 0, "The maximum number of exemplars in memory, across the cluster. 0 to disable exemplars ingestion.") f.Var(&l.ActiveSeriesCustomTrackersConfig, "ingester.active-series-custom-trackers", "Additional active series metrics, matching the provided matchers. Matchers should be in form :, like 'foobar:{foo=\"bar\"}'. Multiple matchers can be provided either providing the flag multiple times or providing multiple semicolon-separated values to a single flag.") - f.Var(&l.OutOfOrderTimeWindow, "ingester.out-of-order-time-window", "Non-zero value enables out-of-order support for most recent samples that are within the time window in relation to the following two conditions: (1) The newest sample for that time series, if it exists. For example, within [series.maxTime-timeWindow, series.maxTime]). (2) The TSDB's maximum time, if the series does not exist. For example, within [db.maxTime-timeWindow, db.maxTime]). The ingester will need more memory as a factor of _rate of out-of-order samples being ingested_ and _the number of series that are getting out-of-order samples_. You can configure it per tenant.") + f.Var(&l.OutOfOrderTimeWindow, "ingester.out-of-order-time-window", "Non-zero value enables out-of-order support for most recent samples that are within the time window in relation to the following two conditions: (1) The newest sample for that time series, if it exists. For example, within [series.maxTime-timeWindow, series.maxTime]). (2) The TSDB's maximum time, if the series does not exist. For example, within [db.maxTime-timeWindow, db.maxTime]). The ingester will need more memory as a factor of rate of out-of-order samples being ingested and the number of series that are getting out-of-order samples.") f.IntVar(&l.MaxChunksPerQuery, MaxChunksPerQueryFlag, 2e6, "Maximum number of chunks that can be fetched in a single query from ingesters and long-term storage. This limit is enforced in the querier, ruler and store-gateway. 0 to disable.") f.IntVar(&l.MaxFetchedSeriesPerQuery, MaxSeriesPerQueryFlag, 0, "The maximum number of unique series for which a query can fetch samples from each ingesters and storage. This limit is enforced in the querier and ruler. 0 to disable") From b85d722d2f10d217d76cc25da2bff48a7e8f8eb1 Mon Sep 17 00:00:00 2001 From: Steve Simpson Date: Fri, 24 Jun 2022 18:31:20 +0200 Subject: [PATCH 50/63] Docs: Basic documentation for deploying the ruler using jsonnet. (#2127) * Docs: Basic documentation for deploying the ruler using jsonnet. * Fix doc links. * Move to new file, fix indents, minor rewording. * Make checks on first migration step more explicit. * Review comments: Remove redundant text. Add link to configuration parameters. * Apply suggestions from code review Co-authored-by: Ursula Kallio * Review comments. * Fix doc link filename * Review comments Co-authored-by: Ursula Kallio --- .../jsonnet/configuring-ruler.md | 98 +++++++++++++++++++ 1 file changed, 98 insertions(+) create mode 100644 docs/sources/operators-guide/deploying-grafana-mimir/jsonnet/configuring-ruler.md diff --git a/docs/sources/operators-guide/deploying-grafana-mimir/jsonnet/configuring-ruler.md b/docs/sources/operators-guide/deploying-grafana-mimir/jsonnet/configuring-ruler.md new file mode 100644 index 0000000000..7ec76e0741 --- /dev/null +++ b/docs/sources/operators-guide/deploying-grafana-mimir/jsonnet/configuring-ruler.md @@ -0,0 +1,98 @@ +--- +title: "Configuring the Grafana Mimir ruler with Jsonnet" +menuTitle: "Configuring ruler" +description: "Learn how to configure the Grafana Mimir ruler when using Jsonnet." +weight: 20 +--- + +# Configuring the Grafana Mimir ruler with Jsonnet + +The ruler is an optional component and is therefore not deployed by default when using Jsonnet. +For more information about the ruler, see [Grafana Mimir ruler]({{< relref "../../architecture/components/ruler/index.md" >}}). + +To enable it, add the following Jsonnet code to the `_config` section: + +```jsonnet +_config+:: { + ruler_enabled: true + ruler_client_type: '', +} +``` + +The `ruler_client_type` option must be one of either `local`, `azure`, `aws`, or `s3`. +For more information about the options available for storing ruler state, see [Grafana Mimir ruler: State]({{< relref "../../architecture/components/ruler/index.md#state" >}}). + +To get started, use the `local` client type for initial testing: + +```jsonnet +_config+:: { + ruler_enabled: true + ruler_client_type: 'local', + ruler_local_directory: '/path/to/local/directory', +} +``` + +If you are using object storage, additional configuration options are required: + +- Amazon S3 (`s3`) + + - `ruler_storage_bucket_name` + - `aws_region` + +- Google Cloud Storage (`gcs`) + + - `ruler_storage_bucket_name` + +- Azure (`azure`) + - `ruler_storage_bucket_name` + - `ruler_storage_azure_account_name` + - `ruler_storage_azure_account_key` + +> **Note:** You need to manually provide the storage credentials for `s3` and `gcs` by using additional command line arguments as necessary. For more information, see [Grafana Mimir configuration parameters: ruler_storage]({{< relref "../../configuring/reference-configuration-parameters/index.md#ruler_storage" >}}). + +## Operational modes + +The ruler has two operational modes: _internal_ and _remote_. By default, the Jsonnet deploys the ruler by using the internal operational mode. +For more information about these modes, see [Operational modes]({{< relref "../../architecture/components/ruler/index.md#operational-modes" >}}). + +To enable the remote operational mode, add the following code to the Jsonnet: + +```jsonnet +_config+:: { + ruler_remote_evaluation_enabled: true +} +``` + +> **Note:** To support the _remote_ operational mode, a separate query path is deployed to evaluate rules that consist of three additional Kubernetes deployments: +> +> - `ruler-query-frontend` +> - `ruler-query-scheduler` +> - `ruler-querier` + +### Migrate to remote evaluation + +To perform a zero downtime migration from internal to remote rule evaluation, follow these steps: + +1. Deploy the following changes to enable remote evaluation in migration mode. + Doing so causes the three new and previously listed Kubernetes deployments to start. However, they will not reconfigure the ruler to use them just yet. + + ```jsonnet + _config+:: { + ruler_remote_evaluation_enabled: true + ruler_remote_evaluation_migration_enabled: true + } + ``` + +1. Check that all of pods for the following deployments have successfully started before moving to the next step: + + - `ruler-query-frontend` + - `ruler-query-scheduler` + - `ruler-querier` + +1. Reconfigure the ruler pods to perform remote evaluation, by deploying the following changes: + + ```jsonnet + _config+:: { + ruler_remote_evaluation_enabled: true + } + ``` From 7e4accdb8208dc3f5714cb8f6b7686d16f441773 Mon Sep 17 00:00:00 2001 From: Nick Pillitteri <56quarters@users.noreply.github.com> Date: Fri, 24 Jun 2022 13:08:48 -0400 Subject: [PATCH 51/63] Use BasicLifecycler for distributors and auto-forget (#2154) Use the BasicLifecycler in distributors for managing their lifecycle so that we can take advantage of the "auto-forget" delegates feature. This prevents the ring from filling up with "unhealthy" distributors that are never removed. This wasn't a bug but it was confusing for users and operators. Fixes #2138 Signed-off-by: Nick Pillitteri --- CHANGELOG.md | 1 + .../config/mimir.yaml | 5 + .../tsdb-blocks-storage-s3/compose-up.sh | 2 +- .../config/grafana-agent.yaml | 4 +- .../tsdb-blocks-storage-s3/config/mimir.yaml | 1 - .../config/prometheus.yaml | 4 +- .../docker-compose.jsonnet | 7 +- .../tsdb-blocks-storage-s3/docker-compose.yml | 25 +++- .../config/mimir.yaml | 5 + pkg/distributor/distributor.go | 141 ++++++++++++------ pkg/distributor/distributor_ring.go | 58 +++---- pkg/distributor/distributor_ring_test.go | 73 --------- pkg/distributor/distributor_test.go | 2 +- pkg/distributor/instance_count.go | 53 +++++++ pkg/distributor/instance_count_test.go | 101 +++++++++++++ 15 files changed, 323 insertions(+), 159 deletions(-) delete mode 100644 pkg/distributor/distributor_ring_test.go create mode 100644 pkg/distributor/instance_count.go create mode 100644 pkg/distributor/instance_count_test.go diff --git a/CHANGELOG.md b/CHANGELOG.md index 6eb02aeeec..3a8a3af19a 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -40,6 +40,7 @@ * [ENHANCEMENT] Compactor: Run sanity check on blocks storage configuration at startup. #2143 * [ENHANCEMENT] Compactor: Add HTTP API for uploading TSDB blocks. Enabled with `-compactor.block-upload-enabled`. #1694 #2126 * [ENHANCEMENT] Ingester: Enable querying overlapping blocks by default. #2187 +* [ENHANCEMENT] Distributor: Auto-forget unhealthy distributors after ten failed ring heartbeats. #2154 * [BUGFIX] Fix regexp parsing panic for regexp label matchers with start/end quantifiers. #1883 * [BUGFIX] Ingester: fixed deceiving error log "failed to update cached shipped blocks after shipper initialisation", occurring for each new tenant in the ingester. #1893 * [BUGFIX] Ring: fix bug where instances may appear unhealthy in the hash ring web UI even though they are not. #1933 diff --git a/development/tsdb-blocks-storage-s3-single-binary/config/mimir.yaml b/development/tsdb-blocks-storage-s3-single-binary/config/mimir.yaml index 34211b4dde..50987754c6 100644 --- a/development/tsdb-blocks-storage-s3-single-binary/config/mimir.yaml +++ b/development/tsdb-blocks-storage-s3-single-binary/config/mimir.yaml @@ -3,6 +3,11 @@ multitenancy_enabled: false distributor: pool: health_check_ingesters: true + ring: + kvstore: + store: consul + consul: + host: consul:8500 ingester_client: grpc_client_config: diff --git a/development/tsdb-blocks-storage-s3/compose-up.sh b/development/tsdb-blocks-storage-s3/compose-up.sh index 5f6368af41..f55f3b83c0 100755 --- a/development/tsdb-blocks-storage-s3/compose-up.sh +++ b/development/tsdb-blocks-storage-s3/compose-up.sh @@ -14,5 +14,5 @@ cd $SCRIPT_DIR && make # -gcflags "all=-N -l" disables optimizations that allow for better run with combination with Delve debugger. # GOARCH is not changed. CGO_ENABLED=0 GOOS=linux go build -mod=vendor -gcflags "all=-N -l" -o ${SCRIPT_DIR}/mimir ${SCRIPT_DIR}/../../cmd/mimir -docker-compose -f ${SCRIPT_DIR}/docker-compose.yml build distributor +docker-compose -f ${SCRIPT_DIR}/docker-compose.yml build distributor-1 docker-compose -f ${SCRIPT_DIR}/docker-compose.yml up $@ diff --git a/development/tsdb-blocks-storage-s3/config/grafana-agent.yaml b/development/tsdb-blocks-storage-s3/config/grafana-agent.yaml index 649b2ef5ff..e33510dc11 100644 --- a/development/tsdb-blocks-storage-s3/config/grafana-agent.yaml +++ b/development/tsdb-blocks-storage-s3/config/grafana-agent.yaml @@ -13,7 +13,7 @@ prometheus: scrape_configs: - job_name: tsdb-blocks-storage-s3/distributor static_configs: - - targets: ['distributor:8001'] + - targets: ['distributor-1:8000', 'distributor-2:8001'] labels: cluster: 'docker-compose' namespace: 'tsdb-blocks-storage-s3' @@ -61,4 +61,4 @@ prometheus: namespace: 'tsdb-blocks-storage-s3' remote_write: - - url: http://distributor:8001/api/v1/push + - url: http://distributor-1:8000/api/v1/push diff --git a/development/tsdb-blocks-storage-s3/config/mimir.yaml b/development/tsdb-blocks-storage-s3/config/mimir.yaml index 79af7dccfe..ae51b55bd3 100644 --- a/development/tsdb-blocks-storage-s3/config/mimir.yaml +++ b/development/tsdb-blocks-storage-s3/config/mimir.yaml @@ -4,7 +4,6 @@ distributor: pool: health_check_ingesters: true ring: - instance_addr: 127.0.0.1 kvstore: store: consul consul: diff --git a/development/tsdb-blocks-storage-s3/config/prometheus.yaml b/development/tsdb-blocks-storage-s3/config/prometheus.yaml index d2a432e306..a2550b5eb2 100644 --- a/development/tsdb-blocks-storage-s3/config/prometheus.yaml +++ b/development/tsdb-blocks-storage-s3/config/prometheus.yaml @@ -6,7 +6,7 @@ global: scrape_configs: - job_name: tsdb-blocks-storage-s3/distributor static_configs: - - targets: ['distributor:8001'] + - targets: ['distributor-1:8000', 'distributor-2:8001'] labels: cluster: 'docker-compose' namespace: 'tsdb-blocks-storage-s3' @@ -54,5 +54,5 @@ scrape_configs: namespace: 'tsdb-blocks-storage-s3' remote_write: - - url: http://distributor:8001/api/v1/push + - url: http://distributor-1:8000/api/v1/push send_exemplars: true diff --git a/development/tsdb-blocks-storage-s3/docker-compose.jsonnet b/development/tsdb-blocks-storage-s3/docker-compose.jsonnet index 4662ad179f..786fb73a3a 100644 --- a/development/tsdb-blocks-storage-s3/docker-compose.jsonnet +++ b/development/tsdb-blocks-storage-s3/docker-compose.jsonnet @@ -36,7 +36,12 @@ std.manifestYamlDoc({ {}, distributor:: { - distributor: mimirService({ + 'distributor-1': mimirService({ + target: 'distributor', + httpPort: 8000, + }), + + 'distributor-2': mimirService({ target: 'distributor', httpPort: 8001, }), diff --git a/development/tsdb-blocks-storage-s3/docker-compose.yml b/development/tsdb-blocks-storage-s3/docker-compose.yml index 4e0bbf65f7..2ba1b3892b 100644 --- a/development/tsdb-blocks-storage-s3/docker-compose.yml +++ b/development/tsdb-blocks-storage-s3/docker-compose.yml @@ -100,7 +100,30 @@ "image": "consul" "ports": - "8500:8500" - "distributor": + "distributor-1": + "build": + "context": "." + "dockerfile": "dev.dockerfile" + "command": + - "sh" + - "-c" + - "sleep 3 && exec ./mimir -config.file=./config/mimir.yaml -target=distributor -server.http-listen-port=8000 -server.grpc-listen-port=9000 -activity-tracker.filepath=/activity/distributor-8000 " + "depends_on": + - "minio" + - "consul" + "environment": + - "JAEGER_AGENT_HOST=jaeger" + - "JAEGER_AGENT_PORT=6831" + - "JAEGER_SAMPLER_PARAM=1" + - "JAEGER_SAMPLER_TYPE=const" + - "JAEGER_TAGS=app=distributor" + "image": "mimir" + "ports": + - "8000:8000" + "volumes": + - "./config:/mimir/config" + - "./activity:/activity" + "distributor-2": "build": "context": "." "dockerfile": "dev.dockerfile" diff --git a/development/tsdb-blocks-storage-swift-single-binary/config/mimir.yaml b/development/tsdb-blocks-storage-swift-single-binary/config/mimir.yaml index 3dbffde831..bd827e0cb9 100644 --- a/development/tsdb-blocks-storage-swift-single-binary/config/mimir.yaml +++ b/development/tsdb-blocks-storage-swift-single-binary/config/mimir.yaml @@ -3,6 +3,11 @@ multitenancy_enabled: false distributor: pool: health_check_ingesters: true + ring: + kvstore: + store: consul + consul: + host: consul:8500 ingester_client: grpc_client_config: diff --git a/pkg/distributor/distributor.go b/pkg/distributor/distributor.go index 5e60645c17..23fefd6e60 100644 --- a/pkg/distributor/distributor.go +++ b/pkg/distributor/distributor.go @@ -19,6 +19,7 @@ import ( "github.com/go-kit/log" "github.com/go-kit/log/level" "github.com/grafana/dskit/flagext" + "github.com/grafana/dskit/kv" "github.com/grafana/dskit/limiter" "github.com/grafana/dskit/ring" ring_client "github.com/grafana/dskit/ring/client" @@ -65,8 +66,12 @@ var ( ) const ( - // DistributorRingKey is the key under which we store the distributors ring in the KVStore. - DistributorRingKey = "distributor" + // distributorRingKey is the key under which we store the distributors ring in the KVStore. + distributorRingKey = "distributor" + + // ringAutoForgetUnhealthyPeriods is how many consecutive timeout periods an unhealthy instance + // in the ring will be automatically removed after. + ringAutoForgetUnhealthyPeriods = 10 ) const ( @@ -87,8 +92,9 @@ type Distributor struct { // The global rate limiter requires a distributors ring to count // the number of healthy instances - distributorsLifeCycler *ring.Lifecycler + distributorsLifecycler *ring.BasicLifecycler distributorsRing *ring.Ring + healthyInstancesCount *atomic.Uint32 // For handling HA replicas. HATracker *haTracker @@ -206,44 +212,16 @@ func New(cfg Config, clientConfig ingester_client.Config, limits *validation.Ove subservices := []services.Service(nil) subservices = append(subservices, haTracker) - // Create the configured ingestion rate limit strategy (local or global). In case - // it's an internal dependency and can't join the distributors ring, we skip rate - // limiting. - var ingestionRateStrategy, requestRateStrategy limiter.RateLimiterStrategy - var distributorsLifeCycler *ring.Lifecycler - var distributorsRing *ring.Ring - - if !canJoinDistributorsRing { - requestRateStrategy = newInfiniteRateStrategy() - ingestionRateStrategy = newInfiniteRateStrategy() - } else { - distributorsLifeCycler, err = ring.NewLifecycler(cfg.DistributorRing.ToLifecyclerConfig(), nil, "distributor", DistributorRingKey, true, log, prometheus.WrapRegistererWithPrefix("cortex_", reg)) - if err != nil { - return nil, err - } - - distributorsRing, err = ring.New(cfg.DistributorRing.ToRingConfig(), "distributor", DistributorRingKey, log, prometheus.WrapRegistererWithPrefix("cortex_", reg)) - if err != nil { - return nil, errors.Wrap(err, "failed to initialize distributors' ring client") - } - subservices = append(subservices, distributorsLifeCycler, distributorsRing) - - requestRateStrategy = newGlobalRateStrategy(newRequestRateStrategy(limits), distributorsLifeCycler) - ingestionRateStrategy = newGlobalRateStrategy(newIngestionRateStrategy(limits), distributorsLifeCycler) - } - d := &Distributor{ - cfg: cfg, - log: log, - ingestersRing: ingestersRing, - ingesterPool: NewPool(cfg.PoolConfig, ingestersRing, cfg.IngesterClientFactory, log), - distributorsLifeCycler: distributorsLifeCycler, - distributorsRing: distributorsRing, - limits: limits, - requestRateLimiter: limiter.NewRateLimiter(requestRateStrategy, 10*time.Second), - ingestionRateLimiter: limiter.NewRateLimiter(ingestionRateStrategy, 10*time.Second), - HATracker: haTracker, - ingestionRate: util_math.NewEWMARate(0.2, instanceIngestionRateTickInterval), + cfg: cfg, + log: log, + ingestersRing: ingestersRing, + ingesterPool: NewPool(cfg.PoolConfig, ingestersRing, cfg.IngesterClientFactory, log), + healthyInstancesCount: atomic.NewUint32(0), + limits: limits, + forwarder: forwarding.NewForwarder(reg, cfg.Forwarding), + HATracker: haTracker, + ingestionRate: util_math.NewEWMARate(0.2, instanceIngestionRateTickInterval), queryDuration: instrument.NewHistogramCollector(promauto.With(reg).NewHistogramVec(prometheus.HistogramOpts{ Namespace: "cortex", @@ -351,7 +329,31 @@ func New(cfg Config, clientConfig ingester_client.Config, limits *validation.Ove return d.ingestionRate.Rate() }) - d.forwarder = forwarding.NewForwarder(reg, d.cfg.Forwarding) + // Create the configured ingestion rate limit strategy (local or global). In case + // it's an internal dependency and we can't join the distributors ring, we skip rate + // limiting. + var ingestionRateStrategy, requestRateStrategy limiter.RateLimiterStrategy + var distributorsLifecycler *ring.BasicLifecycler + var distributorsRing *ring.Ring + + if !canJoinDistributorsRing { + requestRateStrategy = newInfiniteRateStrategy() + ingestionRateStrategy = newInfiniteRateStrategy() + } else { + distributorsRing, distributorsLifecycler, err = newRingAndLifecycler(cfg.DistributorRing, d.healthyInstancesCount, log, reg) + if err != nil { + return nil, err + } + + subservices = append(subservices, distributorsLifecycler, distributorsRing) + requestRateStrategy = newGlobalRateStrategy(newRequestRateStrategy(limits), d) + ingestionRateStrategy = newGlobalRateStrategy(newIngestionRateStrategy(limits), d) + } + + d.requestRateLimiter = limiter.NewRateLimiter(requestRateStrategy, 10*time.Second) + d.ingestionRateLimiter = limiter.NewRateLimiter(ingestionRateStrategy, 10*time.Second) + d.distributorsLifecycler = distributorsLifecycler + d.distributorsRing = distributorsRing d.replicationFactor.Set(float64(ingestersRing.ReplicationFactor())) d.activeUsers = util.NewActiveUsersCleanupWithDefaultValues(d.cleanupInactiveUser) @@ -361,6 +363,7 @@ func New(cfg Config, clientConfig ingester_client.Config, limits *validation.Ove if err != nil { return nil, err } + d.subservicesWatcher = services.NewFailureWatcher() d.subservicesWatcher.WatchManager(d.subservices) @@ -368,9 +371,52 @@ func New(cfg Config, clientConfig ingester_client.Config, limits *validation.Ove return d, nil } +// newRingAndLifecycler creates a new distributor ring and lifecycler with all required lifecycler delegates +func newRingAndLifecycler(cfg RingConfig, instanceCount *atomic.Uint32, logger log.Logger, reg prometheus.Registerer) (*ring.Ring, *ring.BasicLifecycler, error) { + kvStore, err := kv.NewClient(cfg.KVStore, ring.GetCodec(), kv.RegistererWithKVName(reg, "distributor-lifecycler"), logger) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to initialize distributors' KV store") + } + + lifecyclerCfg, err := cfg.ToBasicLifecyclerConfig(logger) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to build distributors' lifecycler config") + } + + var delegate ring.BasicLifecyclerDelegate + delegate = ring.NewInstanceRegisterDelegate(ring.ACTIVE, ringNumTokens) + delegate = newHealthyInstanceDelegate(instanceCount, cfg.HeartbeatTimeout, delegate) + delegate = ring.NewLeaveOnStoppingDelegate(delegate, logger) + delegate = ring.NewAutoForgetDelegate(ringAutoForgetUnhealthyPeriods*cfg.HeartbeatTimeout, delegate, logger) + + distributorsLifecycler, err := ring.NewBasicLifecycler(lifecyclerCfg, "distributor", distributorRingKey, kvStore, delegate, logger, prometheus.WrapRegistererWithPrefix("cortex_", reg)) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to initialize distributors' lifecycler") + } + + distributorsRing, err := ring.New(cfg.ToRingConfig(), "distributor", distributorRingKey, logger, prometheus.WrapRegistererWithPrefix("cortex_", reg)) + if err != nil { + return nil, nil, errors.Wrap(err, "failed to initialize distributors' ring client") + } + + return distributorsRing, distributorsLifecycler, nil +} + func (d *Distributor) starting(ctx context.Context) error { - // Only report success if all sub-services start properly - return services.StartManagerAndAwaitHealthy(ctx, d.subservices) + if err := services.StartManagerAndAwaitHealthy(ctx, d.subservices); err != nil { + return errors.Wrap(err, "unable to start distributor subservices") + } + + // Distributors get embedded in rulers and queriers to talk to ingesters on the query path. In that + // case they won't have a distributor lifecycler or ring so don't try to join the distributor ring. + if d.distributorsLifecycler != nil && d.distributorsRing != nil { + level.Info(d.log).Log("msg", "waiting until distributor is ACTIVE in the ring") + if err := ring.WaitInstanceState(ctx, d.distributorsRing, d.distributorsLifecycler.GetInstanceID(), ring.ACTIVE); err != nil { + return err + } + } + + return nil } func (d *Distributor) running(ctx context.Context) error { @@ -1420,3 +1466,12 @@ func (d *Distributor) ServeHTTP(w http.ResponseWriter, req *http.Request) { util.WriteHTMLResponse(w, ringNotEnabledPage) } } + +// HealthyInstancesCount implements the ReadLifecycler interface +// +// We use a ring lifecycler delegate to count the number of members of the +// ring. The count is then used to enforce rate limiting correctly for each +// distributor. $EFFECTIVE_RATE_LIMIT = $GLOBAL_RATE_LIMIT / $NUM_INSTANCES +func (d *Distributor) HealthyInstancesCount() int { + return int(d.healthyInstancesCount.Load()) +} diff --git a/pkg/distributor/distributor_ring.go b/pkg/distributor/distributor_ring.go index e19ae140c7..2c6583dbba 100644 --- a/pkg/distributor/distributor_ring.go +++ b/pkg/distributor/distributor_ring.go @@ -7,6 +7,7 @@ package distributor import ( "flag" + "fmt" "os" "time" @@ -16,8 +17,13 @@ import ( "github.com/grafana/dskit/kv" "github.com/grafana/dskit/netutil" "github.com/grafana/dskit/ring" +) - util_log "github.com/grafana/mimir/pkg/util/log" +const ( + // ringNumTokens is how many tokens each distributor should have in the ring. + // Distributors use a ring because they need to know how many distributors there + // are in total for rate limiting. + ringNumTokens = 1 ) // RingConfig masks the ring lifecycler config which contains @@ -43,7 +49,7 @@ type RingConfig struct { func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet, logger log.Logger) { hostname, err := os.Hostname() if err != nil { - level.Error(util_log.Logger).Log("msg", "failed to get hostname", "err", err) + level.Error(logger).Log("msg", "failed to get hostname", "err", err) os.Exit(1) } @@ -61,39 +67,23 @@ func (cfg *RingConfig) RegisterFlags(f *flag.FlagSet, logger log.Logger) { f.StringVar(&cfg.InstanceID, "distributor.ring.instance-id", hostname, "Instance ID to register in the ring.") } -// ToLifecyclerConfig returns a LifecyclerConfig based on the distributor -// ring config. -func (cfg *RingConfig) ToLifecyclerConfig() ring.LifecyclerConfig { - // We have to make sure that the ring.LifecyclerConfig and ring.Config - // defaults are preserved - lc := ring.LifecyclerConfig{} - rc := ring.Config{} - - flagext.DefaultValues(&lc) - flagext.DefaultValues(&rc) - - // Configure ring - rc.KVStore = cfg.KVStore - rc.HeartbeatTimeout = cfg.HeartbeatTimeout - rc.ReplicationFactor = 1 +func (cfg *RingConfig) ToBasicLifecyclerConfig(logger log.Logger) (ring.BasicLifecyclerConfig, error) { + instanceAddr, err := ring.GetInstanceAddr(cfg.InstanceAddr, cfg.InstanceInterfaceNames, logger) + if err != nil { + return ring.BasicLifecyclerConfig{}, err + } - // Configure lifecycler - lc.RingConfig = rc - lc.ListenPort = cfg.ListenPort - lc.Addr = cfg.InstanceAddr - lc.Port = cfg.InstancePort - lc.ID = cfg.InstanceID - lc.InfNames = cfg.InstanceInterfaceNames - lc.UnregisterOnShutdown = true - lc.HeartbeatPeriod = cfg.HeartbeatPeriod - lc.HeartbeatTimeout = cfg.HeartbeatTimeout - lc.ObservePeriod = 0 - lc.NumTokens = 1 - lc.JoinAfter = 0 - lc.MinReadyDuration = 0 - lc.FinalSleep = 0 - - return lc + instancePort := ring.GetInstancePort(cfg.InstancePort, cfg.ListenPort) + + return ring.BasicLifecyclerConfig{ + ID: cfg.InstanceID, + Addr: fmt.Sprintf("%s:%d", instanceAddr, instancePort), + HeartbeatPeriod: cfg.HeartbeatPeriod, + HeartbeatTimeout: cfg.HeartbeatTimeout, + TokensObservePeriod: 0, + NumTokens: ringNumTokens, + KeepInstanceInTheRingOnShutdown: false, + }, nil } func (cfg *RingConfig) ToRingConfig() ring.Config { diff --git a/pkg/distributor/distributor_ring_test.go b/pkg/distributor/distributor_ring_test.go deleted file mode 100644 index 735879179c..0000000000 --- a/pkg/distributor/distributor_ring_test.go +++ /dev/null @@ -1,73 +0,0 @@ -// SPDX-License-Identifier: AGPL-3.0-only -// Provenance-includes-location: https://github.com/cortexproject/cortex/blob/master/pkg/distributor/distributor_ring_test.go -// Provenance-includes-license: Apache-2.0 -// Provenance-includes-copyright: The Cortex Authors. - -package distributor - -import ( - "testing" - "time" - - "github.com/grafana/dskit/flagext" - "github.com/grafana/dskit/ring" - "github.com/stretchr/testify/assert" -) - -func TestRingConfig_DefaultConfigToLifecyclerConfig(t *testing.T) { - cfg := RingConfig{} - expected := ring.LifecyclerConfig{} - - flagext.DefaultValues(&cfg) - flagext.DefaultValues(&expected) - - // The default config of the distributor ring must be the exact same - // of the default lifecycler config, except few options which are - // intentionally overridden - expected.ListenPort = cfg.ListenPort - expected.RingConfig.ReplicationFactor = 1 - expected.RingConfig.KVStore.Store = "memberlist" - expected.NumTokens = 1 - expected.MinReadyDuration = 0 - expected.FinalSleep = 0 - expected.InfNames = cfg.InstanceInterfaceNames - - assert.Equal(t, expected, cfg.ToLifecyclerConfig()) -} - -func TestRingConfig_CustomConfigToLifecyclerConfig(t *testing.T) { - cfg := RingConfig{} - expected := ring.LifecyclerConfig{} - - flagext.DefaultValues(&cfg) - flagext.DefaultValues(&expected) - - // Customize the distributor ring config - cfg.HeartbeatPeriod = 1 * time.Second - cfg.HeartbeatTimeout = 10 * time.Second - cfg.InstanceID = "test" - cfg.InstanceInterfaceNames = []string{"abc1"} - cfg.InstancePort = 10 - cfg.InstanceAddr = "1.2.3.4" - cfg.ListenPort = 10 - - // The lifecycler config should be generated based upon the distributor - // ring config - expected.HeartbeatPeriod = cfg.HeartbeatPeriod - expected.HeartbeatTimeout = cfg.HeartbeatTimeout - expected.RingConfig.HeartbeatTimeout = cfg.HeartbeatTimeout - expected.RingConfig.KVStore.Store = "memberlist" - expected.ID = cfg.InstanceID - expected.InfNames = cfg.InstanceInterfaceNames - expected.Port = cfg.InstancePort - expected.Addr = cfg.InstanceAddr - expected.ListenPort = cfg.ListenPort - - // Hardcoded config - expected.RingConfig.ReplicationFactor = 1 - expected.NumTokens = 1 - expected.MinReadyDuration = 0 - expected.FinalSleep = 0 - - assert.Equal(t, expected, cfg.ToLifecyclerConfig()) -} diff --git a/pkg/distributor/distributor_test.go b/pkg/distributor/distributor_test.go index 6c6691a5de..8fcdbf6248 100644 --- a/pkg/distributor/distributor_test.go +++ b/pkg/distributor/distributor_test.go @@ -2866,7 +2866,7 @@ func prepare(t *testing.T, cfg prepConfig) ([]*Distributor, []mockIngester, []*p // updates to the expected size if distributors[0].distributorsRing != nil { test.Poll(t, time.Second, cfg.numDistributors, func() interface{} { - return distributors[0].distributorsLifeCycler.HealthyInstancesCount() + return distributors[0].HealthyInstancesCount() }) } diff --git a/pkg/distributor/instance_count.go b/pkg/distributor/instance_count.go new file mode 100644 index 0000000000..78b4ffa3e6 --- /dev/null +++ b/pkg/distributor/instance_count.go @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package distributor + +import ( + "time" + + "github.com/grafana/dskit/ring" + "go.uber.org/atomic" +) + +// healthyInstanceDelegate counts the number of healthy instances that are part of the ring +// and stores the count to the provided atomic integer. Used here to count the number of +// distributors in the ring to determine how to enforce rate limiting. +type healthyInstanceDelegate struct { + count *atomic.Uint32 + heartbeatTimeout time.Duration + next ring.BasicLifecyclerDelegate +} + +func newHealthyInstanceDelegate(count *atomic.Uint32, heartbeatTimeout time.Duration, next ring.BasicLifecyclerDelegate) *healthyInstanceDelegate { + return &healthyInstanceDelegate{count: count, heartbeatTimeout: heartbeatTimeout, next: next} +} + +// OnRingInstanceRegister implements the ring.BasicLifecyclerDelegate interface +func (d *healthyInstanceDelegate) OnRingInstanceRegister(lifecycler *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.InstanceDesc) (ring.InstanceState, ring.Tokens) { + return d.next.OnRingInstanceRegister(lifecycler, ringDesc, instanceExists, instanceID, instanceDesc) +} + +// OnRingInstanceTokens implements the ring.BasicLifecyclerDelegate interface +func (d *healthyInstanceDelegate) OnRingInstanceTokens(lifecycler *ring.BasicLifecycler, tokens ring.Tokens) { + d.next.OnRingInstanceTokens(lifecycler, tokens) +} + +// OnRingInstanceStopping implements the ring.BasicLifecyclerDelegate interface +func (d *healthyInstanceDelegate) OnRingInstanceStopping(lifecycler *ring.BasicLifecycler) { + d.next.OnRingInstanceStopping(lifecycler) +} + +// OnRingInstanceHeartbeat implements the ring.BasicLifecyclerDelegate interface +func (d *healthyInstanceDelegate) OnRingInstanceHeartbeat(lifecycler *ring.BasicLifecycler, ringDesc *ring.Desc, instanceDesc *ring.InstanceDesc) { + activeMembers := uint32(0) + now := time.Now() + + for _, instance := range ringDesc.Ingesters { + if ring.ACTIVE == instance.State && instance.IsHeartbeatHealthy(d.heartbeatTimeout, now) { + activeMembers++ + } + } + + d.count.Store(activeMembers) + d.next.OnRingInstanceHeartbeat(lifecycler, ringDesc, instanceDesc) +} diff --git a/pkg/distributor/instance_count_test.go b/pkg/distributor/instance_count_test.go new file mode 100644 index 0000000000..7cb2673337 --- /dev/null +++ b/pkg/distributor/instance_count_test.go @@ -0,0 +1,101 @@ +// SPDX-License-Identifier: AGPL-3.0-only + +package distributor + +import ( + "testing" + "time" + + "github.com/grafana/dskit/ring" + "github.com/stretchr/testify/assert" + "go.uber.org/atomic" +) + +type nopDelegate struct{} + +func (n nopDelegate) OnRingInstanceRegister(lifecycler *ring.BasicLifecycler, ringDesc ring.Desc, instanceExists bool, instanceID string, instanceDesc ring.InstanceDesc) (ring.InstanceState, ring.Tokens) { + return instanceDesc.State, instanceDesc.GetTokens() +} + +func (n nopDelegate) OnRingInstanceTokens(lifecycler *ring.BasicLifecycler, tokens ring.Tokens) { +} + +func (n nopDelegate) OnRingInstanceStopping(lifecycler *ring.BasicLifecycler) { +} + +func (n nopDelegate) OnRingInstanceHeartbeat(lifecycler *ring.BasicLifecycler, ringDesc *ring.Desc, instanceDesc *ring.InstanceDesc) { +} + +func TestHealthyInstanceDelegate_OnRingInstanceHeartbeat(t *testing.T) { + // addInstance registers a new instance with the given ring and sets its last heartbeat timestamp + addInstance := func(desc *ring.Desc, id string, state ring.InstanceState, timestamp int64) { + instance := desc.AddIngester(id, "127.0.0.1", "", []uint32{1}, state, time.Now()) + instance.Timestamp = timestamp + desc.Ingesters[id] = instance + } + + tests := map[string]struct { + ringSetup func(desc *ring.Desc) + heartbeatTimeout time.Duration + expectedCount uint32 + }{ + "all instances healthy and active": { + ringSetup: func(desc *ring.Desc) { + now := time.Now() + addInstance(desc, "distributor-1", ring.ACTIVE, now.Unix()) + addInstance(desc, "distributor-2", ring.ACTIVE, now.Unix()) + addInstance(desc, "distributor-3", ring.ACTIVE, now.Unix()) + }, + heartbeatTimeout: time.Minute, + expectedCount: 3, + }, + + "all instances healthy not all instances active": { + ringSetup: func(desc *ring.Desc) { + now := time.Now() + addInstance(desc, "distributor-1", ring.ACTIVE, now.Unix()) + addInstance(desc, "distributor-2", ring.LEAVING, now.Unix()) + addInstance(desc, "distributor-3", ring.ACTIVE, now.Unix()) + }, + heartbeatTimeout: time.Minute, + expectedCount: 2, + }, + + "some instances healthy all instances active": { + ringSetup: func(desc *ring.Desc) { + now := time.Now() + addInstance(desc, "distributor-1", ring.ACTIVE, now.Unix()) + addInstance(desc, "distributor-2", ring.ACTIVE, now.Unix()) + addInstance(desc, "distributor-3", ring.ACTIVE, now.Add(-5*time.Minute).Unix()) + }, + heartbeatTimeout: time.Minute, + expectedCount: 2, + }, + + "some instances healthy but timeout disabled all instances active": { + ringSetup: func(desc *ring.Desc) { + now := time.Now() + addInstance(desc, "distributor-1", ring.ACTIVE, now.Unix()) + addInstance(desc, "distributor-2", ring.ACTIVE, now.Unix()) + addInstance(desc, "distributor-3", ring.ACTIVE, now.Add(-5*time.Minute).Unix()) + }, + heartbeatTimeout: 0, + expectedCount: 3, + }, + } + + for testName, testData := range tests { + t.Run(testName, func(t *testing.T) { + count := atomic.NewUint32(0) + ringDesc := ring.NewDesc() + + testData.ringSetup(ringDesc) + instance := ringDesc.Ingesters["distributor-1"] + + delegate := newHealthyInstanceDelegate(count, testData.heartbeatTimeout, &nopDelegate{}) + delegate.OnRingInstanceHeartbeat(&ring.BasicLifecycler{}, ringDesc, &instance) + + assert.Equal(t, testData.expectedCount, count.Load()) + }) + } +} From 84481b7f511dbc7d4138ba3f7f8ba74c7217ab9d Mon Sep 17 00:00:00 2001 From: Jack Baldry Date: Mon, 27 Jun 2022 15:53:56 +0100 Subject: [PATCH 52/63] Set CODEOWNERS to primary technical writer (#2242) --- CODEOWNERS | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CODEOWNERS b/CODEOWNERS index 0613d51fc4..6507b90876 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1 +1 @@ -/docs/ @grafana/docs-squad @jdbaldry \ No newline at end of file +/docs/ @osg-grafana From 1355190bbb60dd20e67f01c6ec0e347c68b69656 Mon Sep 17 00:00:00 2001 From: Ganesh Vernekar <15064823+codesome@users.noreply.github.com> Date: Mon, 27 Jun 2022 23:44:10 +0530 Subject: [PATCH 53/63] Vendor latest mimir-prometheus/main (#2243) * Vendor latest mimir-prometheus/main Signed-off-by: Ganesh Vernekar * Update the bytes length of the meta files With the new vendored version of mimir-prometheus the meta files now have a new property called out_of_order to state if the block was an out-of-order block or not. This new property gets added to every meta file so we need to update the expected lengths of this tests. This is how the new property looks in the metas: - "out_of_order": false, Co-authored-by: Jesus Vazquez --- go.mod | 2 +- go.sum | 4 +- pkg/storage/tsdb/upload_test.go | 6 +-- .../prometheus/prometheus/tsdb/block.go | 3 ++ .../prometheus/prometheus/tsdb/compact.go | 7 ++-- .../prometheus/prometheus/tsdb/db.go | 40 ++++++++++++++++--- .../prometheus/prometheus/tsdb/head_wal.go | 14 ++++--- vendor/modules.txt | 4 +- 8 files changed, 58 insertions(+), 22 deletions(-) diff --git a/go.mod b/go.mod index 5c683f5670..73a5beb3a0 100644 --- a/go.mod +++ b/go.mod @@ -227,7 +227,7 @@ replace git.apache.org/thrift.git => github.com/apache/thrift v0.0.0-20180902110 replace github.com/bradfitz/gomemcache => github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab // Using a fork of Prometheus while we work on querysharding to avoid a dependency on the upstream. -replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20220624104020-1446b53d874c +replace github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20220627145625-5e8406a1d4a5 // Out of order Support forces us to fork thanos because we've changed the ChunkReader interface. // Once the out of order support is upstreamed and Thanos has vendored it, we can remove this override. diff --git a/go.sum b/go.sum index e66b9e4d05..6fb1b14ca4 100644 --- a/go.sum +++ b/go.sum @@ -744,8 +744,8 @@ github.com/grafana/e2e v0.1.1-0.20220519104354-1db01e4751fe h1:mxrRWDjKtob43xF9n github.com/grafana/e2e v0.1.1-0.20220519104354-1db01e4751fe/go.mod h1:+26VJWpczg2OU3D0537acnHSHzhJORpxOs6F+M27tZo= github.com/grafana/memberlist v0.3.1-0.20220425183535-6b97a09b7167 h1:PgEQkGHR4YimSCEGT5IoswN9gJKZDVskf+he6UClCLw= github.com/grafana/memberlist v0.3.1-0.20220425183535-6b97a09b7167/go.mod h1:MS2lj3INKhZjWNqd3N0m3J+Jxf3DAOnAH9VT3Sh9MUE= -github.com/grafana/mimir-prometheus v0.0.0-20220624104020-1446b53d874c h1:oJdK/F/mW2j/dy2nKOtmcMBVnHx70mAf2tE1T2oqLPE= -github.com/grafana/mimir-prometheus v0.0.0-20220624104020-1446b53d874c/go.mod h1:evpqrqffGRI38M1zH3IHpmXTeho8IfX5Qpx6Ixpqhyk= +github.com/grafana/mimir-prometheus v0.0.0-20220627145625-5e8406a1d4a5 h1:xlK4WGUnyG7odN0XeMGHzGB7s1/uuEKacl2X9fXcUbA= +github.com/grafana/mimir-prometheus v0.0.0-20220627145625-5e8406a1d4a5/go.mod h1:evpqrqffGRI38M1zH3IHpmXTeho8IfX5Qpx6Ixpqhyk= github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2 h1:uirlL/j72L93RhV4+mkWhjv0cov2I0MIgPOG9rMDr1k= github.com/grafana/regexp v0.0.0-20220304095617-2e8d9baf4ac2/go.mod h1:M5qHK+eWfAv8VR/265dIuEpL3fNfeC21tXXp9itM24A= github.com/grafana/thanos v0.19.1-0.20220610094531-ab07eb568317 h1:DG++oZD7E6YUm8YNZOu7RwZ8J/Slhcx3iOlKQBY6Oh0= diff --git a/pkg/storage/tsdb/upload_test.go b/pkg/storage/tsdb/upload_test.go index 480b85f3e0..111bb3dc09 100644 --- a/pkg/storage/tsdb/upload_test.go +++ b/pkg/storage/tsdb/upload_test.go @@ -105,7 +105,7 @@ func TestUploadBlock(t *testing.T) { require.Equal(t, 3, len(bkt.Objects())) require.Equal(t, 3751, len(bkt.Objects()[path.Join(b1.String(), block.ChunksDirname, "000001")])) require.Equal(t, 401, len(bkt.Objects()[path.Join(b1.String(), block.IndexFilename)])) - require.Equal(t, 546, len(bkt.Objects()[path.Join(b1.String(), block.MetaFilename)])) + require.Equal(t, 570, len(bkt.Objects()[path.Join(b1.String(), block.MetaFilename)])) origMeta, err := metadata.ReadFromDir(path.Join(tmpDir, "test", b1.String())) require.NoError(t, err) @@ -131,7 +131,7 @@ func TestUploadBlock(t *testing.T) { require.Equal(t, 3, len(bkt.Objects())) require.Equal(t, 3751, len(bkt.Objects()[path.Join(b1.String(), block.ChunksDirname, "000001")])) require.Equal(t, 401, len(bkt.Objects()[path.Join(b1.String(), block.IndexFilename)])) - require.Equal(t, 546, len(bkt.Objects()[path.Join(b1.String(), block.MetaFilename)])) + require.Equal(t, 570, len(bkt.Objects()[path.Join(b1.String(), block.MetaFilename)])) }) t.Run("upload with no external labels works just fine", func(t *testing.T) { @@ -151,7 +151,7 @@ func TestUploadBlock(t *testing.T) { require.Equal(t, 6, len(bkt.Objects())) // 3 from b1, 3 from b2 require.Equal(t, 3736, len(bkt.Objects()[path.Join(b2.String(), block.ChunksDirname, "000001")])) require.Equal(t, 401, len(bkt.Objects()[path.Join(b2.String(), block.IndexFilename)])) - require.Equal(t, 525, len(bkt.Objects()[path.Join(b2.String(), block.MetaFilename)])) + require.Equal(t, 549, len(bkt.Objects()[path.Join(b2.String(), block.MetaFilename)])) origMeta, err := metadata.ReadFromDir(path.Join(tmpDir, b2.String())) require.NoError(t, err) diff --git a/vendor/github.com/prometheus/prometheus/tsdb/block.go b/vendor/github.com/prometheus/prometheus/tsdb/block.go index 399a2eed1f..3c820fc626 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/block.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/block.go @@ -169,6 +169,9 @@ type BlockMeta struct { // Version of the index format. Version int `json:"version"` + + // OutOfOrder is true if the block was directly created from out-of-order samples. + OutOfOrder bool `json:"out_of_order"` } // BlockStats contains stats about contents of a block. diff --git a/vendor/github.com/prometheus/prometheus/tsdb/compact.go b/vendor/github.com/prometheus/prometheus/tsdb/compact.go index 80d3f62536..2db02c340e 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/compact.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/compact.go @@ -608,9 +608,10 @@ func (c *LeveledCompactor) compactOOO(dest string, oooHead *OOOCompactionHead, s for jx := range outBlocks[ix] { uid := ulid.MustNew(outBlocksTime, rand.Reader) meta := &BlockMeta{ - ULID: uid, - MinTime: mint, - MaxTime: maxt, + ULID: uid, + MinTime: mint, + MaxTime: maxt, + OutOfOrder: true, } meta.Compaction.Level = 1 meta.Compaction.Sources = []ulid.ULID{uid} diff --git a/vendor/github.com/prometheus/prometheus/tsdb/db.go b/vendor/github.com/prometheus/prometheus/tsdb/db.go index 305559e222..2670118bea 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/db.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/db.go @@ -838,10 +838,13 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs } // Set the min valid time for the ingested samples // to be no lower than the maxt of the last block. - blocks := db.Blocks() minValidTime := int64(math.MinInt64) - if len(blocks) > 0 { - minValidTime = blocks[len(blocks)-1].Meta().MaxTime + // We do not consider blocks created from out-of-order samples for Head's minValidTime + // since minValidTime is only for the in-order data and we do not want to discard unnecessary + // samples from the Head. + inOrderMaxTime, ok := db.inOrderBlocksMaxTime() + if ok { + minValidTime = inOrderMaxTime } if initErr := db.head.Init(minValidTime); initErr != nil { @@ -858,7 +861,6 @@ func open(dir string, l log.Logger, r prometheus.Registerer, opts *Options, rngs return nil, errors.Wrap(err, "repair corrupted WAL") } } - } go db.run() @@ -991,6 +993,7 @@ func (db *DB) ApplyConfig(conf *config.Config) error { } } + db.opts.OutOfOrderTimeWindow = oooTimeWindow db.head.ApplyConfig(conf, wblog) if !db.oooWasEnabled.Load() { @@ -1237,10 +1240,11 @@ func (db *DB) reload() error { if err := db.reloadBlocks(); err != nil { return errors.Wrap(err, "reloadBlocks") } - if len(db.blocks) == 0 { + maxt, ok := db.inOrderBlocksMaxTime() + if !ok { return nil } - if err := db.head.Truncate(db.blocks[len(db.blocks)-1].MaxTime()); err != nil { + if err := db.head.Truncate(maxt); err != nil { return errors.Wrap(err, "head truncate") } return nil @@ -1636,6 +1640,30 @@ func (db *DB) Blocks() []*Block { return db.blocks } +// inOrderBlocksMaxTime returns the max time among the blocks that were not totally created +// out of out-of-order data. If the returned boolean is true, it means there is at least +// one such block. +func (db *DB) inOrderBlocksMaxTime() (maxt int64, ok bool) { + maxt, ok, hasOOO := int64(math.MinInt64), false, false + // If blocks are overlapping, last block might not have the max time. So check all blocks. + for _, b := range db.Blocks() { + hasOOO = hasOOO || b.meta.OutOfOrder + if !b.meta.OutOfOrder && b.meta.MaxTime > maxt { + ok = true + maxt = b.meta.MaxTime + } + } + if !hasOOO && ok && db.opts.OutOfOrderTimeWindow > 0 { + // Temporary patch. To be removed by mid July 2022. + // Before this patch, blocks did not have "out_of_order" in their meta, so we cannot + // say which block has the out_of_order data. In that case the out-of-order block can be + // up to 2 block ranges ahead of the latest in-order block. + // Note: if hasOOO was true, it means the latest block has the new meta and is taken care in inOrderBlocksMaxTime(). + maxt -= 2 * db.opts.MinBlockDuration + } + return maxt, ok +} + // Head returns the databases's head. func (db *DB) Head() *Head { return db.head diff --git a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go index e92bcc8b88..6b6e835769 100644 --- a/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go +++ b/vendor/github.com/prometheus/prometheus/tsdb/head_wal.go @@ -464,9 +464,9 @@ func (wp *walSubsetProcessor) waitUntilIdle() { } func (h *Head) loadWbl(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.HeadSeriesRef, lastMmapRef chunks.ChunkDiskMapperRef) (err error) { - // Track number of samples that referenced a series we don't know about + // Track number of samples, m-map markers, that referenced a series we don't know about // for error reporting. - var unknownRefs atomic.Uint64 + var unknownRefs, mmapMarkerUnknownRefs atomic.Uint64 lastSeq, lastOff := lastMmapRef.Unpack() // Start workers that each process samples for a partition of the series ID space. @@ -593,9 +593,13 @@ func (h *Head) loadWbl(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H continue } + if r, ok := multiRef[rm.Ref]; ok { + rm.Ref = r + } + ms := h.series.getByID(rm.Ref) if ms == nil { - unknownRefs.Inc() + mmapMarkerUnknownRefs.Inc() continue } @@ -635,8 +639,8 @@ func (h *Head) loadWbl(r *wal.Reader, multiRef map[chunks.HeadSeriesRef]chunks.H return errors.Wrap(r.Err(), "read records") } - if unknownRefs.Load() > 0 { - level.Warn(h.logger).Log("msg", "Unknown series references for ooo WAL replay", "samples", unknownRefs.Load()) + if unknownRefs.Load() > 0 || mmapMarkerUnknownRefs.Load() > 0 { + level.Warn(h.logger).Log("msg", "Unknown series references for ooo WAL replay", "samples", unknownRefs.Load(), "mmap_markers", mmapMarkerUnknownRefs.Load()) } return nil } diff --git a/vendor/modules.txt b/vendor/modules.txt index 376c1334ef..26643293b5 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -717,7 +717,7 @@ github.com/prometheus/node_exporter/https github.com/prometheus/procfs github.com/prometheus/procfs/internal/fs github.com/prometheus/procfs/internal/util -# github.com/prometheus/prometheus v1.8.2-0.20220308163432-03831554a519 => github.com/grafana/mimir-prometheus v0.0.0-20220624104020-1446b53d874c +# github.com/prometheus/prometheus v1.8.2-0.20220308163432-03831554a519 => github.com/grafana/mimir-prometheus v0.0.0-20220627145625-5e8406a1d4a5 ## explicit; go 1.17 github.com/prometheus/prometheus/config github.com/prometheus/prometheus/discovery @@ -1227,7 +1227,7 @@ gopkg.in/yaml.v2 gopkg.in/yaml.v3 # git.apache.org/thrift.git => github.com/apache/thrift v0.0.0-20180902110319-2566ecd5d999 # github.com/bradfitz/gomemcache => github.com/themihai/gomemcache v0.0.0-20180902122335-24332e2d58ab -# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20220624104020-1446b53d874c +# github.com/prometheus/prometheus => github.com/grafana/mimir-prometheus v0.0.0-20220627145625-5e8406a1d4a5 # github.com/thanos-io/thanos => github.com/grafana/thanos v0.19.1-0.20220610094531-ab07eb568317 # github.com/hashicorp/go-immutable-radix => github.com/hashicorp/go-immutable-radix v1.2.0 # github.com/hashicorp/go-hclog => github.com/hashicorp/go-hclog v0.12.2 From 5a0bbd66a6709e93e63b8ce0362b3e41c9e50aec Mon Sep 17 00:00:00 2001 From: Jesus Vazquez Date: Mon, 27 Jun 2022 20:43:37 +0200 Subject: [PATCH 54/63] Mimir documentation about out-of-order (#2183) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Mimir documentation about out-of-order Out of order support is coming as an experimental feature in Mimir 2.2.0. This commit adds the main documentation files about the feature. * Remove ingester entry from CHANGELOG * Accept suggestion * Address suggestions and fill the configuration * Address Ursula's feedback Co-authored-by: Ursula Kallio * Update ingester WBL doc * Address Peter's suggestions Co-authored-by: Peter Štibraný * Remove prettier wraps * Apply suggestions from code review Co-authored-by: Peter Štibraný Co-authored-by: Marco Pracucci * Add clarification note * Other suggestions * Mauro's suggestion Co-authored-by: Mauro Stettler Co-authored-by: Ursula Kallio Co-authored-by: Peter Štibraný Co-authored-by: Marco Pracucci Co-authored-by: Mauro Stettler --- .../architecture/components/ingester.md | 22 +++++++-- .../configuring/about-versioning.md | 1 + ...figuring-out-of-order-samples-ingestion.md | 48 +++++++++++++++++++ 3 files changed, 68 insertions(+), 3 deletions(-) create mode 100644 docs/sources/operators-guide/configuring/configuring-out-of-order-samples-ingestion.md diff --git a/docs/sources/operators-guide/architecture/components/ingester.md b/docs/sources/operators-guide/architecture/components/ingester.md index 1eabc32cc9..0affe789dc 100644 --- a/docs/sources/operators-guide/architecture/components/ingester.md +++ b/docs/sources/operators-guide/architecture/components/ingester.md @@ -42,10 +42,11 @@ Write de-amplification is the main source of Mimir's low total cost of ownership ## Ingesters failure and data loss If an ingester process crashes or exits abruptly, all the in-memory series that have not yet been uploaded to the long-term storage could be lost. -There are two primary ways to mitigate this failure mode: +There are the following ways to mitigate this failure mode: -1. Replication -2. Write-ahead log (WAL) +- Replication +- Write-ahead log (WAL) +- Write-behind log (WBL), only used if out-of-order ingestion is enabled. ### Replication @@ -63,6 +64,15 @@ If an ingester fails, a subsequent process restart replays the WAL and recovers Contrary to the sole replication, and given that the persistent disk data is not lost, in the event of the failure of multiple ingesters, each ingester recovers the in-memory series samples from WAL after a subsequent restart. Replication is still recommended in order to gracefully handle a single ingester failure. +### Write-behind log + +The write-behind log (WBL) is similar to the WAL, but it only writes incoming out-of-order samples to a persistent disk until the series are uploaded to long-term storage. + +There is a different log for this because it is not possible to know if a sample is out-of-order until Mimir tries to append it. +First Mimir needs to attempt to append it, the TSDB will detect that it is out-of-order, append it anyway if out-of-order is enabled and then write it to the log. + +If the ingesters fail, the same characteristics as in the WAL apply. + ## Zone aware replication Zone aware replication ensures that the ingester replicas for a given time series are divided across different zones. @@ -76,3 +86,9 @@ To set up multi-zone replication, refer to [Configuring zone-aware replication]( Shuffle sharding can be used to reduce the effect that multiple tenants can have on each other. For more information on shuffle sharding, refer to [Configuring shuffle sharding]({{< relref "../../configuring/configuring-shuffle-sharding/index.md" >}}). + +## Out-of-order samples ingestion + +Out-of-order samples are discarded by default. If the system writing samples to Mimir produces out-of-order samples, you can enable ingestion of such samples. + +For more information about out-of-order samples ingestion, refer to [Configuring out of order samples ingestion]({{< relref "../../configuring/configuring-out-of-order-samples-ingestion.md" >}}). diff --git a/docs/sources/operators-guide/configuring/about-versioning.md b/docs/sources/operators-guide/configuring/about-versioning.md index c113726457..ed0272de3a 100644 --- a/docs/sources/operators-guide/configuring/about-versioning.md +++ b/docs/sources/operators-guide/configuring/about-versioning.md @@ -76,6 +76,7 @@ The following features are currently experimental: - Add variance to chunks end time to spread writing across time (`-blocks-storage.tsdb.head-chunks-end-time-variance`) - Using queue and asynchronous chunks disk mapper (`-blocks-storage.tsdb.head-chunks-write-queue-size`) - Snapshotting of in-memory TSDB data on disk when shutting down (`-blocks-storage.tsdb.memory-snapshot-on-shutdown`) + - Out-of-order samples ingestion (`-ingester.out-of-order-allowance`) - Query-frontend - `-query-frontend.querier-forget-delay` - Query-scheduler diff --git a/docs/sources/operators-guide/configuring/configuring-out-of-order-samples-ingestion.md b/docs/sources/operators-guide/configuring/configuring-out-of-order-samples-ingestion.md new file mode 100644 index 0000000000..495c363bca --- /dev/null +++ b/docs/sources/operators-guide/configuring/configuring-out-of-order-samples-ingestion.md @@ -0,0 +1,48 @@ +--- +title: "Configuring out-of-order samples ingestion" +menuTitle: "Configuring out-of-order samples ingestion" +description: "Learn how to configure Grafana Mimir to handle out-of-order samples ingestion." +weight: 120 +--- + +# Configuring out-of-order samples ingestion + +Grafana Mimir and the Prometheus TSDB understand out-of-order as follows. + +The moment that a new series sample arrives, Mimir need to determine if the series already exists, and whether or not the sample is too old: + +- If the series exists, the incoming sample must have a newer timestamp than the latest sample that is stored for the series. + Otherwise, it is considered out-of-order and will be dropped by the ingesters. +- If the series does not exist, then the sample has to be within bounds, which go back 1 hour from TSDB's head-block max time (when using 2 hour block range). If it fails to be within bounds, then it is also considered out-of-bounds and will be dropped by the ingesters. + +> **Note:** If you're writing metrics using Prometheus remote write or the Grafana Agent, then out-of-order samples are unexpected. +> Prometheus and Grafana Agent guarantee that samples are written in-order for the same series. + +If you have out-of-order samples due to the nature of your architecture or the system that is being observed, then you can configure Grafana Mimir to set an out-of-order time-window threshold for how old samples can be ingested. +As a result, none of the preceding samples will be dropped if they are within the configured time window. + +## Configuring out-of-order samples ingestion instance wide + +To configure Grafana Mimir to accept out-of-order samples, see the following configuration snippet: + +```yaml +limits: + # Allow ingestion of out-of-order samples up to 5 minutes since the latest received sample for the series. + out_of_order_time_window: 5m +``` + +## Configure out-of-order samples per tenant + +If your Grafana Mimir has multitenancy enabled, you can still use the preceding method to set a default out-of-order time window threshold for all tenants. +If a particular tenant needs a custom threshold, you can use the runtime configuration to set a per-tenant override. + +1. Enable [runtime configuration]({{< relref "about-runtime-configuration.md" >}}). +1. Add an override for the tenant that needs a custom out-of-order time window: + +```yaml +overrides: + tenant1: + out_of_order_time_window: 2h + tenant2: + out_of_order_time_window: 30m +``` From 9914a64126abc58b7368b1cfb08f5cabd63968c8 Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Tue, 28 Jun 2022 09:41:45 +0200 Subject: [PATCH 55/63] Helm: metamonitor naming (#2236) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Helm: Move serviceMonitor to metaMonitoring In values.yaml, move serviceMonitor to metaMonitoring.serviceMonitor. The reason is to group everything related to metaMonitoring and also avoid the impression that this serviceMonitor is somehow processed by Mimir. Signed-off-by: György Krajcsovits --- .../charts/mimir-distributed/CHANGELOG.md | 1 + .../templates/lib/service-monitor.tpl | 2 +- .../kube-state-metrics-servmon.yaml | 2 +- .../kubelet-cadvisor-servmon.yaml | 2 +- .../mimir-distributed/templates/validate.yaml | 4 +- .../helm/charts/mimir-distributed/values.yaml | 61 ++++++++++--------- 6 files changed, 37 insertions(+), 35 deletions(-) diff --git a/operations/helm/charts/mimir-distributed/CHANGELOG.md b/operations/helm/charts/mimir-distributed/CHANGELOG.md index 35bfb6bec2..423fb1e9a6 100644 --- a/operations/helm/charts/mimir-distributed/CHANGELOG.md +++ b/operations/helm/charts/mimir-distributed/CHANGELOG.md @@ -13,6 +13,7 @@ Entries should include a reference to the Pull Request that introduced the chang ## main / unreleased * [CHANGE] Enable multi-tenancy by default. This means `multitenancy_enabled` is now `true` for both Mimir and Enterprise Metrics. Nginx will inject `X-Scope-OrgID=anonymous` header if the header is not present, ensuring backwards compatibility. #2117 +* [CHANGE] **breaking change** The value `serviceMonitor` and everything under it is moved to `metaMonitoring.serviceMonitor` to group all meta-monitoring settings under one section. #2236 * [CHANGE] **breaking change** Chart now uses custom memcached templates to remove bitnami dependency. There are changes to the Helm values, listed bellow. #2064 - The `memcached` section now contains common values shared across all memcached instances. - New `memcachedExporter` section was added to configure memcached metrics exporter. diff --git a/operations/helm/charts/mimir-distributed/templates/lib/service-monitor.tpl b/operations/helm/charts/mimir-distributed/templates/lib/service-monitor.tpl index 3357d95dfa..f83ebea070 100644 --- a/operations/helm/charts/mimir-distributed/templates/lib/service-monitor.tpl +++ b/operations/helm/charts/mimir-distributed/templates/lib/service-monitor.tpl @@ -6,7 +6,7 @@ Params: memberlist = true/false, whether component is part of memberlist */}} {{- define "mimir.lib.serviceMonitor" -}} -{{- with .ctx.Values.serviceMonitor }} +{{- with .ctx.Values.metaMonitoring.serviceMonitor }} {{- if .enabled }} apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor diff --git a/operations/helm/charts/mimir-distributed/templates/metamonitoring/kube-state-metrics-servmon.yaml b/operations/helm/charts/mimir-distributed/templates/metamonitoring/kube-state-metrics-servmon.yaml index 145c101fcf..02eb4cf985 100644 --- a/operations/helm/charts/mimir-distributed/templates/metamonitoring/kube-state-metrics-servmon.yaml +++ b/operations/helm/charts/mimir-distributed/templates/metamonitoring/kube-state-metrics-servmon.yaml @@ -1,5 +1,5 @@ {{- if and ((.Values.metaMonitoring).grafanaAgent).enabled ((((.Values.metaMonitoring).grafanaAgent).metrics).scrapeK8s).enabled }} -{{- with .Values.serviceMonitor }} +{{- with .Values.metaMonitoring.serviceMonitor }} {{- if .enabled }} apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor diff --git a/operations/helm/charts/mimir-distributed/templates/metamonitoring/kubelet-cadvisor-servmon.yaml b/operations/helm/charts/mimir-distributed/templates/metamonitoring/kubelet-cadvisor-servmon.yaml index d6bd60de21..fb0671dd68 100644 --- a/operations/helm/charts/mimir-distributed/templates/metamonitoring/kubelet-cadvisor-servmon.yaml +++ b/operations/helm/charts/mimir-distributed/templates/metamonitoring/kubelet-cadvisor-servmon.yaml @@ -1,5 +1,5 @@ {{- if and ((.Values.metaMonitoring).grafanaAgent).enabled ((((.Values.metaMonitoring).grafanaAgent).metrics).scrapeK8s).enabled }} -{{- with .Values.serviceMonitor }} +{{- with .Values.metaMonitoring.serviceMonitor }} {{- if .enabled }} apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor diff --git a/operations/helm/charts/mimir-distributed/templates/validate.yaml b/operations/helm/charts/mimir-distributed/templates/validate.yaml index abefe109fd..05c91a0184 100644 --- a/operations/helm/charts/mimir-distributed/templates/validate.yaml +++ b/operations/helm/charts/mimir-distributed/templates/validate.yaml @@ -31,7 +31,7 @@ && (remote.url != "" || (len(additionalConfigs) > 0 && additionalConfigs.url != "")) */}} {{- if and - (not (($.Values).serviceMonitor).enabled) + (not ((($.Values).metaMonitoring).serviceMonitor).enabled) .enabled (or (not (empty ((.metrics).remote).url)) @@ -41,6 +41,6 @@ ) ) }} -{{- fail "metaMonitoring.grafanaAgent.remote.url is set, but serivceMonitor is disabled; you will not see any metrics, so enable the serviceMOnitor or remove the remote configuration" }} +{{- fail "metaMonitoring.grafanaAgent.remote.url is set, but metaMonitoring.serviceMonitor is disabled; you will not see any metrics, so enable the metaMonitoring.serviceMonitor or remove the remote configuration" }} {{- end }} {{- end }} diff --git a/operations/helm/charts/mimir-distributed/values.yaml b/operations/helm/charts/mimir-distributed/values.yaml index d0d221c657..071968a344 100644 --- a/operations/helm/charts/mimir-distributed/values.yaml +++ b/operations/helm/charts/mimir-distributed/values.yaml @@ -263,36 +263,6 @@ runtimeConfig: {} rbac: pspEnabled: true -# ServiceMonitor configuration -serviceMonitor: - # -- If enabled, ServiceMonitor resources for Prometheus Operator are created - enabled: false - # -- To disable setting a 'cluster' label in metrics, set to 'null'. - # To overwrite the 'cluster' label with your own value, set to a non-empty string. - # Keep empty string "" to have the default value in the 'cluster' label, which is the helm release name for Mimir and the actual cluster name for Enterprise Metrics. - clusterLabel: "" - # -- Alternative namespace for ServiceMonitor resources - # If left unset, the default is to install the ServiceMonitor resources in the namespace where the chart is installed, i.e. the namespace specified for the helm command. - namespace: null - # -- Namespace selector for ServiceMonitor resources - # If left unset, the default is to select the namespace where the chart is installed, i.e. the namespace specified for the helm command. - namespaceSelector: null - # -- ServiceMonitor annotations - annotations: {} - # -- Additional ServiceMonitor labels - labels: {} - # -- ServiceMonitor scrape interval - interval: null - # -- ServiceMonitor scrape timeout in Go duration format (e.g. 15s) - scrapeTimeout: null - # -- ServiceMonitor relabel configs to apply to samples before scraping - # https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig - relabelings: [] - # -- ServiceMonitor will use http by default, but you can pick https as well - scheme: http - # -- ServiceMonitor will use these tlsConfig settings to make the health check requests - tlsConfig: null - alertmanager: enabled: true replicas: 1 @@ -1802,6 +1772,37 @@ smoke_test: initContainers: [] metaMonitoring: + # ServiceMonitor configuration for monitoring Kubernetes Services with Prometheus Operator and/or Grafana Agent + serviceMonitor: + # -- If enabled, ServiceMonitor resources for Prometheus Operator are created + enabled: false + # -- To disable setting a 'cluster' label in metrics, set to 'null'. + # To overwrite the 'cluster' label with your own value, set to a non-empty string. + # Keep empty string "" to have the default value in the 'cluster' label, which is the helm release name for Mimir and the actual cluster name for Enterprise Metrics. + clusterLabel: "" + # -- Alternative namespace for ServiceMonitor resources + # If left unset, the default is to install the ServiceMonitor resources in the namespace where the chart is installed, i.e. the namespace specified for the helm command. + namespace: null + # -- Namespace selector for ServiceMonitor resources + # If left unset, the default is to select the namespace where the chart is installed, i.e. the namespace specified for the helm command. + namespaceSelector: null + # -- ServiceMonitor annotations + annotations: {} + # -- Additional ServiceMonitor labels + labels: {} + # -- ServiceMonitor scrape interval + interval: null + # -- ServiceMonitor scrape timeout in Go duration format (e.g. 15s) + scrapeTimeout: null + # -- ServiceMonitor relabel configs to apply to samples before scraping + # https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + relabelings: [] + # -- ServiceMonitor will use http by default, but you can pick https as well + scheme: http + # -- ServiceMonitor will use these tlsConfig settings to make the health check requests + tlsConfig: null + + # metaMonitoringAgent configures the built in Grafana Agent that can scrape metrics and logs and send them to a local or remote destination grafanaAgent: # -- Controls whether to create PodLogs, MetricsInstance, LogsInstance, and GrafanaAgent CRs to scrape the # ServiceMonitors of the chart and ship metrics and logs to the remote endpoints below. From 335daafa27f0479ba73c259caaeee44f572e4b88 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Tue, 28 Jun 2022 09:59:05 +0200 Subject: [PATCH 56/63] Jsonnet: Configure ingester max global metadata per user and per metric (#2250) * operations/mimir: Configure -ingester.max-global-metadata-per-{user,metric} Signed-off-by: Arve Knudsen * Update CHANGELOG.md Co-authored-by: Marco Pracucci * operations/mimir: Fix tests Signed-off-by: Arve Knudsen Co-authored-by: Marco Pracucci --- CHANGELOG.md | 2 ++ .../mimir-tests/test-autoscaling-generated.yaml | 2 ++ .../mimir-tests/test-consul-generated.yaml | 2 ++ .../test-consul-multi-zone-generated.yaml | 6 ++++++ .../test-consul-ruler-disabled-generated.yaml | 2 ++ .../mimir-tests/test-defaults-generated.yaml | 2 ++ .../test-disable-chunk-streaming-generated.yaml | 2 ++ ...erlist-migration-step-0-before-generated.yaml | 2 ++ ...st-memberlist-migration-step-1-generated.yaml | 2 ++ ...st-memberlist-migration-step-2-generated.yaml | 2 ++ ...st-memberlist-migration-step-3-generated.yaml | 2 ++ ...st-memberlist-migration-step-4-generated.yaml | 2 ++ ...st-memberlist-migration-step-5-generated.yaml | 2 ++ ...berlist-migration-step-6-final-generated.yaml | 2 ++ .../mimir-tests/test-multi-zone-generated.yaml | 6 ++++++ ...ti-zone-with-ongoing-migration-generated.yaml | 8 ++++++++ .../test-query-sharding-generated.yaml | 2 ++ .../test-ruler-remote-evaluation-generated.yaml | 2 ++ ...er-remote-evaluation-migration-generated.yaml | 2 ++ .../test-shuffle-sharding-generated.yaml | 2 ++ ...le-sharding-read-path-disabled-generated.yaml | 2 ++ .../test-storage-azure-generated.yaml | 2 ++ .../mimir-tests/test-storage-gcs-generated.yaml | 2 ++ .../mimir-tests/test-storage-s3-generated.yaml | 2 ++ operations/mimir/config.libsonnet | 16 ++++++++++++++++ 25 files changed, 78 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 3a8a3af19a..5b1ebbf8d8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -82,6 +82,8 @@ * [CHANGE] Change default value for `-blocks-storage.bucket-store.chunks-cache.memcached.timeout` to `450ms` to increase use of cached data. #2035 * [CHANGE] The `memberlist_ring_enabled` configuration now applies to Alertmanager. #2102 * [CHANGE] Default value for `memberlist_ring_enabled` is now true. It means that all hash rings use Memberlist as default KV store instead of Consul (previous default). #2161 +* [CHANGE] Configure `-ingester.max-global-metadata-per-user` to correspond to 20% of the configured max number of series per tenant. #2250 +* [CHANGE] Configure `-ingester.max-global-metadata-per-metric` to be 10. #2250 * [FEATURE] Added querier autoscaling support. It requires [KEDA](https://keda.sh) installed in the Kubernetes cluster and query-scheduler enabled in the Mimir cluster. Querier autoscaler can be enabled and configure through the following options in the jsonnet config: #2013 #2023 * `autoscaling_querier_enabled`: `true` to enable autoscaling. * `autoscaling_querier_min_replicas`: minimum number of querier replicas. diff --git a/operations/mimir-tests/test-autoscaling-generated.yaml b/operations/mimir-tests/test-autoscaling-generated.yaml index 02394993f4..04527727f7 100644 --- a/operations/mimir-tests/test-autoscaling-generated.yaml +++ b/operations/mimir-tests/test-autoscaling-generated.yaml @@ -988,6 +988,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.heartbeat-period=15s diff --git a/operations/mimir-tests/test-consul-generated.yaml b/operations/mimir-tests/test-consul-generated.yaml index e64d662b4d..33aadbba9f 100644 --- a/operations/mimir-tests/test-consul-generated.yaml +++ b/operations/mimir-tests/test-consul-generated.yaml @@ -1347,6 +1347,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 diff --git a/operations/mimir-tests/test-consul-multi-zone-generated.yaml b/operations/mimir-tests/test-consul-multi-zone-generated.yaml index 378afdf269..9634b0d118 100644 --- a/operations/mimir-tests/test-consul-multi-zone-generated.yaml +++ b/operations/mimir-tests/test-consul-multi-zone-generated.yaml @@ -1558,6 +1558,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 @@ -1670,6 +1672,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 @@ -1782,6 +1786,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 diff --git a/operations/mimir-tests/test-consul-ruler-disabled-generated.yaml b/operations/mimir-tests/test-consul-ruler-disabled-generated.yaml index f81b267747..2d2a310684 100644 --- a/operations/mimir-tests/test-consul-ruler-disabled-generated.yaml +++ b/operations/mimir-tests/test-consul-ruler-disabled-generated.yaml @@ -1237,6 +1237,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 diff --git a/operations/mimir-tests/test-defaults-generated.yaml b/operations/mimir-tests/test-defaults-generated.yaml index 8d106ffedc..06e025029d 100644 --- a/operations/mimir-tests/test-defaults-generated.yaml +++ b/operations/mimir-tests/test-defaults-generated.yaml @@ -655,6 +655,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.heartbeat-period=15s diff --git a/operations/mimir-tests/test-disable-chunk-streaming-generated.yaml b/operations/mimir-tests/test-disable-chunk-streaming-generated.yaml index 584621f306..f36d930466 100644 --- a/operations/mimir-tests/test-disable-chunk-streaming-generated.yaml +++ b/operations/mimir-tests/test-disable-chunk-streaming-generated.yaml @@ -990,6 +990,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.heartbeat-period=15s diff --git a/operations/mimir-tests/test-memberlist-migration-step-0-before-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-0-before-generated.yaml index e64d662b4d..33aadbba9f 100644 --- a/operations/mimir-tests/test-memberlist-migration-step-0-before-generated.yaml +++ b/operations/mimir-tests/test-memberlist-migration-step-0-before-generated.yaml @@ -1347,6 +1347,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 diff --git a/operations/mimir-tests/test-memberlist-migration-step-1-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-1-generated.yaml index f3f47d664d..d88618fd67 100644 --- a/operations/mimir-tests/test-memberlist-migration-step-1-generated.yaml +++ b/operations/mimir-tests/test-memberlist-migration-step-1-generated.yaml @@ -1425,6 +1425,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 diff --git a/operations/mimir-tests/test-memberlist-migration-step-2-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-2-generated.yaml index c3d8de22bc..5230a3a457 100644 --- a/operations/mimir-tests/test-memberlist-migration-step-2-generated.yaml +++ b/operations/mimir-tests/test-memberlist-migration-step-2-generated.yaml @@ -1425,6 +1425,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 diff --git a/operations/mimir-tests/test-memberlist-migration-step-3-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-3-generated.yaml index 4bd9945b84..bc6323fc06 100644 --- a/operations/mimir-tests/test-memberlist-migration-step-3-generated.yaml +++ b/operations/mimir-tests/test-memberlist-migration-step-3-generated.yaml @@ -1425,6 +1425,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 diff --git a/operations/mimir-tests/test-memberlist-migration-step-4-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-4-generated.yaml index 8cfe3a0da9..72c6bd1506 100644 --- a/operations/mimir-tests/test-memberlist-migration-step-4-generated.yaml +++ b/operations/mimir-tests/test-memberlist-migration-step-4-generated.yaml @@ -1425,6 +1425,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.consul.hostname=consul.default.svc.cluster.local:8500 diff --git a/operations/mimir-tests/test-memberlist-migration-step-5-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-5-generated.yaml index 2efbaee732..862f8f2870 100644 --- a/operations/mimir-tests/test-memberlist-migration-step-5-generated.yaml +++ b/operations/mimir-tests/test-memberlist-migration-step-5-generated.yaml @@ -992,6 +992,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.heartbeat-period=15s diff --git a/operations/mimir-tests/test-memberlist-migration-step-6-final-generated.yaml b/operations/mimir-tests/test-memberlist-migration-step-6-final-generated.yaml index f2fb7d9303..4c370e0ba4 100644 --- a/operations/mimir-tests/test-memberlist-migration-step-6-final-generated.yaml +++ b/operations/mimir-tests/test-memberlist-migration-step-6-final-generated.yaml @@ -989,6 +989,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.heartbeat-period=15s diff --git a/operations/mimir-tests/test-multi-zone-generated.yaml b/operations/mimir-tests/test-multi-zone-generated.yaml index a3413ec0bc..0c43164e6d 100644 --- a/operations/mimir-tests/test-multi-zone-generated.yaml +++ b/operations/mimir-tests/test-multi-zone-generated.yaml @@ -1212,6 +1212,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.heartbeat-period=15s @@ -1328,6 +1330,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.heartbeat-period=15s @@ -1444,6 +1448,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.heartbeat-period=15s diff --git a/operations/mimir-tests/test-multi-zone-with-ongoing-migration-generated.yaml b/operations/mimir-tests/test-multi-zone-with-ongoing-migration-generated.yaml index b7bac675a6..7f99fc7432 100644 --- a/operations/mimir-tests/test-multi-zone-with-ongoing-migration-generated.yaml +++ b/operations/mimir-tests/test-multi-zone-with-ongoing-migration-generated.yaml @@ -1269,6 +1269,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.heartbeat-period=15s @@ -1383,6 +1385,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.heartbeat-period=15s @@ -1499,6 +1503,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.heartbeat-period=15s @@ -1615,6 +1621,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.heartbeat-period=15s diff --git a/operations/mimir-tests/test-query-sharding-generated.yaml b/operations/mimir-tests/test-query-sharding-generated.yaml index 725554697c..8a9cd0c173 100644 --- a/operations/mimir-tests/test-query-sharding-generated.yaml +++ b/operations/mimir-tests/test-query-sharding-generated.yaml @@ -994,6 +994,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.heartbeat-period=15s diff --git a/operations/mimir-tests/test-ruler-remote-evaluation-generated.yaml b/operations/mimir-tests/test-ruler-remote-evaluation-generated.yaml index 9ea97b05c3..b83728ea24 100644 --- a/operations/mimir-tests/test-ruler-remote-evaluation-generated.yaml +++ b/operations/mimir-tests/test-ruler-remote-evaluation-generated.yaml @@ -1292,6 +1292,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.heartbeat-period=15s diff --git a/operations/mimir-tests/test-ruler-remote-evaluation-migration-generated.yaml b/operations/mimir-tests/test-ruler-remote-evaluation-migration-generated.yaml index e29d854f01..caaa9342ad 100644 --- a/operations/mimir-tests/test-ruler-remote-evaluation-migration-generated.yaml +++ b/operations/mimir-tests/test-ruler-remote-evaluation-migration-generated.yaml @@ -1291,6 +1291,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.heartbeat-period=15s diff --git a/operations/mimir-tests/test-shuffle-sharding-generated.yaml b/operations/mimir-tests/test-shuffle-sharding-generated.yaml index a98010d6ff..3f30e2dc12 100644 --- a/operations/mimir-tests/test-shuffle-sharding-generated.yaml +++ b/operations/mimir-tests/test-shuffle-sharding-generated.yaml @@ -998,6 +998,8 @@ spec: - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true - -distributor.ingestion-tenant-shard-size=3 + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.heartbeat-period=15s diff --git a/operations/mimir-tests/test-shuffle-sharding-read-path-disabled-generated.yaml b/operations/mimir-tests/test-shuffle-sharding-read-path-disabled-generated.yaml index 2db4ec5519..5ee3a508d8 100644 --- a/operations/mimir-tests/test-shuffle-sharding-read-path-disabled-generated.yaml +++ b/operations/mimir-tests/test-shuffle-sharding-read-path-disabled-generated.yaml @@ -999,6 +999,8 @@ spec: - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true - -distributor.ingestion-tenant-shard-size=3 + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.heartbeat-period=15s diff --git a/operations/mimir-tests/test-storage-azure-generated.yaml b/operations/mimir-tests/test-storage-azure-generated.yaml index 7986937149..8c640eb1c8 100644 --- a/operations/mimir-tests/test-storage-azure-generated.yaml +++ b/operations/mimir-tests/test-storage-azure-generated.yaml @@ -1001,6 +1001,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.heartbeat-period=15s diff --git a/operations/mimir-tests/test-storage-gcs-generated.yaml b/operations/mimir-tests/test-storage-gcs-generated.yaml index f2fb7d9303..4c370e0ba4 100644 --- a/operations/mimir-tests/test-storage-gcs-generated.yaml +++ b/operations/mimir-tests/test-storage-gcs-generated.yaml @@ -989,6 +989,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.heartbeat-period=15s diff --git a/operations/mimir-tests/test-storage-s3-generated.yaml b/operations/mimir-tests/test-storage-s3-generated.yaml index 9e5cf03d52..2f5ceac86b 100644 --- a/operations/mimir-tests/test-storage-s3-generated.yaml +++ b/operations/mimir-tests/test-storage-s3-generated.yaml @@ -996,6 +996,8 @@ spec: - -blocks-storage.tsdb.dir=/data/tsdb - -blocks-storage.tsdb.ship-interval=1m - -distributor.health-check-ingesters=true + - -ingester.max-global-metadata-per-metric=10 + - -ingester.max-global-metadata-per-user=30000 - -ingester.max-global-series-per-metric=20000 - -ingester.max-global-series-per-user=150000 - -ingester.ring.heartbeat-period=15s diff --git a/operations/mimir/config.libsonnet b/operations/mimir/config.libsonnet index 31fde0bcf9..53eeba7076 100644 --- a/operations/mimir/config.libsonnet +++ b/operations/mimir/config.libsonnet @@ -228,6 +228,8 @@ ingesterLimitsConfig: { 'ingester.max-global-series-per-user': $._config.limits.max_global_series_per_user, 'ingester.max-global-series-per-metric': $._config.limits.max_global_series_per_metric, + 'ingester.max-global-metadata-per-user': $._config.limits.max_global_metadata_per_user, + 'ingester.max-global-metadata-per-metric': $._config.limits.max_global_metadata_per_metric, }, rulerLimitsConfig: { 'ruler.max-rules-per-rule-group': $._config.limits.ruler_max_rules_per_rule_group, @@ -247,6 +249,8 @@ // Our limit should be 100k, but we need some room of about ~50% to take rollouts into account max_global_series_per_user: 150000, max_global_series_per_metric: 20000, + max_global_metadata_per_user: std.ceil(self.max_global_series_per_user * 0.2), + max_global_metadata_per_metric: 10, ingestion_rate: 10000, ingestion_burst_size: 200000, @@ -262,6 +266,8 @@ medium_small_user:: { max_global_series_per_user: 300000, max_global_series_per_metric: 30000, + max_global_metadata_per_user: std.ceil(self.max_global_series_per_user * 0.2), + max_global_metadata_per_metric: 10, ingestion_rate: 30000, ingestion_burst_size: 300000, @@ -274,6 +280,8 @@ small_user:: { max_global_series_per_user: 1000000, max_global_series_per_metric: 100000, + max_global_metadata_per_user: std.ceil(self.max_global_series_per_user * 0.2), + max_global_metadata_per_metric: 10, ingestion_rate: 100000, ingestion_burst_size: 1000000, @@ -286,6 +294,8 @@ medium_user:: { max_global_series_per_user: 3000000, // 3M max_global_series_per_metric: 300000, // 300K + max_global_metadata_per_user: std.ceil(self.max_global_series_per_user * 0.2), + max_global_metadata_per_metric: 10, ingestion_rate: 350000, // 350K ingestion_burst_size: 3500000, // 3.5M @@ -298,6 +308,8 @@ big_user:: { max_global_series_per_user: 6000000, // 6M max_global_series_per_metric: 600000, // 600K + max_global_metadata_per_user: std.ceil(self.max_global_series_per_user * 0.2), + max_global_metadata_per_metric: 10, ingestion_rate: 700000, // 700K ingestion_burst_size: 7000000, // 7M @@ -310,6 +322,8 @@ super_user:: { max_global_series_per_user: 12000000, // 12M max_global_series_per_metric: 1200000, // 1.2M + max_global_metadata_per_user: std.ceil(self.max_global_series_per_user * 0.2), + max_global_metadata_per_metric: 10, ingestion_rate: 1500000, // 1.5M ingestion_burst_size: 15000000, // 15M @@ -327,6 +341,8 @@ mega_user+:: { max_global_series_per_user: 16000000, // 16M max_global_series_per_metric: 1600000, // 1.6M + max_global_metadata_per_user: std.ceil(self.max_global_series_per_user * 0.2), + max_global_metadata_per_metric: 10, ingestion_rate: 2250000, // 2.25M ingestion_burst_size: 22500000, // 22.5M From b3b7c6db6dddb2c23a2986d228e31ae1c7131ef4 Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Tue, 28 Jun 2022 10:08:42 +0200 Subject: [PATCH 57/63] Helm: weekly release (#2252) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: György Krajcsovits --- operations/helm/charts/mimir-distributed/Chart.yaml | 2 +- operations/helm/charts/mimir-distributed/README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/operations/helm/charts/mimir-distributed/Chart.yaml b/operations/helm/charts/mimir-distributed/Chart.yaml index e2bf08a961..9809efcc59 100644 --- a/operations/helm/charts/mimir-distributed/Chart.yaml +++ b/operations/helm/charts/mimir-distributed/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 -version: 2.2.0-weekly.191 +version: 2.2.0-weekly.192 appVersion: 2.1.0 description: "Grafana Mimir" home: https://grafana.com/docs/mimir/v2.1.x/ diff --git a/operations/helm/charts/mimir-distributed/README.md b/operations/helm/charts/mimir-distributed/README.md index b342f63153..76a8c9cdf3 100644 --- a/operations/helm/charts/mimir-distributed/README.md +++ b/operations/helm/charts/mimir-distributed/README.md @@ -4,7 +4,7 @@ Helm chart for deploying [Grafana Mimir](https://grafana.com/docs/mimir/v2.1.x/) # mimir-distributed -![Version: 2.2.0-weekly.191](https://img.shields.io/badge/Version-2.2.0--weekly.191-informational?style=flat-square) ![AppVersion: 2.1.0](https://img.shields.io/badge/AppVersion-2.1.0-informational?style=flat-square) +![Version: 2.2.0-weekly.192](https://img.shields.io/badge/Version-2.2.0--weekly.192-informational?style=flat-square) ![AppVersion: 2.1.0](https://img.shields.io/badge/AppVersion-2.1.0-informational?style=flat-square) Grafana Mimir From c33373665435f5fa7110d8b6625e36a059489b66 Mon Sep 17 00:00:00 2001 From: Arve Knudsen Date: Tue, 28 Jun 2022 10:15:14 +0200 Subject: [PATCH 58/63] operations/mimir: Change multi_zone_ingester_max_unavailable to 25 (#2251) * operations/mimir: Change multi_zone_ingester_max_unavailable to 25 Signed-off-by: Arve Knudsen * operations/mimir: Fix tests Signed-off-by: Arve Knudsen --- CHANGELOG.md | 1 + .../mimir-tests/test-consul-multi-zone-generated.yaml | 6 +++--- operations/mimir-tests/test-multi-zone-generated.yaml | 6 +++--- .../test-multi-zone-with-ongoing-migration-generated.yaml | 6 +++--- operations/mimir/multi-zone.libsonnet | 2 +- 5 files changed, 11 insertions(+), 10 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5b1ebbf8d8..1fb1fb7858 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -84,6 +84,7 @@ * [CHANGE] Default value for `memberlist_ring_enabled` is now true. It means that all hash rings use Memberlist as default KV store instead of Consul (previous default). #2161 * [CHANGE] Configure `-ingester.max-global-metadata-per-user` to correspond to 20% of the configured max number of series per tenant. #2250 * [CHANGE] Configure `-ingester.max-global-metadata-per-metric` to be 10. #2250 +* [CHANGE] Change `_config.multi_zone_ingester_max_unavailable` to 25. #2251 * [FEATURE] Added querier autoscaling support. It requires [KEDA](https://keda.sh) installed in the Kubernetes cluster and query-scheduler enabled in the Mimir cluster. Querier autoscaler can be enabled and configure through the following options in the jsonnet config: #2013 #2023 * `autoscaling_querier_enabled`: `true` to enable autoscaling. * `autoscaling_querier_min_replicas`: minimum number of querier replicas. diff --git a/operations/mimir-tests/test-consul-multi-zone-generated.yaml b/operations/mimir-tests/test-consul-multi-zone-generated.yaml index 9634b0d118..4605cabad9 100644 --- a/operations/mimir-tests/test-consul-multi-zone-generated.yaml +++ b/operations/mimir-tests/test-consul-multi-zone-generated.yaml @@ -1517,7 +1517,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: annotations: - rollout-max-unavailable: "10" + rollout-max-unavailable: "25" labels: rollout-group: ingester name: ingester-zone-a @@ -1631,7 +1631,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: annotations: - rollout-max-unavailable: "10" + rollout-max-unavailable: "25" labels: rollout-group: ingester name: ingester-zone-b @@ -1745,7 +1745,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: annotations: - rollout-max-unavailable: "10" + rollout-max-unavailable: "25" labels: rollout-group: ingester name: ingester-zone-c diff --git a/operations/mimir-tests/test-multi-zone-generated.yaml b/operations/mimir-tests/test-multi-zone-generated.yaml index 0c43164e6d..d18f19e4f0 100644 --- a/operations/mimir-tests/test-multi-zone-generated.yaml +++ b/operations/mimir-tests/test-multi-zone-generated.yaml @@ -1170,7 +1170,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: annotations: - rollout-max-unavailable: "10" + rollout-max-unavailable: "25" labels: rollout-group: ingester name: ingester-zone-a @@ -1288,7 +1288,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: annotations: - rollout-max-unavailable: "10" + rollout-max-unavailable: "25" labels: rollout-group: ingester name: ingester-zone-b @@ -1406,7 +1406,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: annotations: - rollout-max-unavailable: "10" + rollout-max-unavailable: "25" labels: rollout-group: ingester name: ingester-zone-c diff --git a/operations/mimir-tests/test-multi-zone-with-ongoing-migration-generated.yaml b/operations/mimir-tests/test-multi-zone-with-ongoing-migration-generated.yaml index 7f99fc7432..aae43af816 100644 --- a/operations/mimir-tests/test-multi-zone-with-ongoing-migration-generated.yaml +++ b/operations/mimir-tests/test-multi-zone-with-ongoing-migration-generated.yaml @@ -1343,7 +1343,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: annotations: - rollout-max-unavailable: "10" + rollout-max-unavailable: "25" labels: rollout-group: ingester name: ingester-zone-a @@ -1461,7 +1461,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: annotations: - rollout-max-unavailable: "10" + rollout-max-unavailable: "25" labels: rollout-group: ingester name: ingester-zone-b @@ -1579,7 +1579,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: annotations: - rollout-max-unavailable: "10" + rollout-max-unavailable: "25" labels: rollout-group: ingester name: ingester-zone-c diff --git a/operations/mimir/multi-zone.libsonnet b/operations/mimir/multi-zone.libsonnet index 5cdca9be8a..694636623d 100644 --- a/operations/mimir/multi-zone.libsonnet +++ b/operations/mimir/multi-zone.libsonnet @@ -18,7 +18,7 @@ multi_zone_ingester_replication_write_path_enabled: true, multi_zone_ingester_replication_read_path_enabled: true, multi_zone_ingester_replicas: 0, - multi_zone_ingester_max_unavailable: 10, + multi_zone_ingester_max_unavailable: 25, multi_zone_store_gateway_enabled: false, multi_zone_store_gateway_read_path_enabled: $._config.multi_zone_store_gateway_enabled, From 293fc2eb23ccd3e97a955f4a9557d7c009de5668 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20=C5=A0tibran=C3=BD?= Date: Tue, 28 Jun 2022 10:41:03 +0200 Subject: [PATCH 59/63] Removed migration of alertmanager local state files from old hierarchy (Cortex 1.8 and earlier) (#2253) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Removed migration of alertmanager local state files from old hierarchy (Cortex 1.8 and earlier). Signed-off-by: Peter Štibraný * CHANGELOG.md update. Signed-off-by: Peter Štibraný --- CHANGELOG.md | 2 +- pkg/alertmanager/multitenant.go | 119 --------------------------- pkg/alertmanager/multitenant_test.go | 33 -------- 3 files changed, 1 insertion(+), 153 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 1fb1fb7858..718d084792 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -8,7 +8,7 @@ * [CHANGE] Default values have changed for the following settings. This improves query performance for recent data (within 12h) by only reading from ingesters: #1909 #1921 - `-blocks-storage.bucket-store.ignore-blocks-within` now defaults to `10h` (previously `0`) - `-querier.query-store-after` now defaults to `12h` (previously `0`) - +* [CHANGE] Alertmanager: removed support for migrating local files from Cortex 1.8 or earlier. Related to original Cortex PR https://github.com/cortexproject/cortex/pull/3910. #2253 * [CHANGE] The following settings are now classified as advanced because the defaults should work for most users and tuning them requires in-depth knowledge of how the read path works: #1929 - `-querier.query-ingesters-within` - `-querier.query-store-after` diff --git a/pkg/alertmanager/multitenant.go b/pkg/alertmanager/multitenant.go index 48ddd8b3cd..8a634520f0 100644 --- a/pkg/alertmanager/multitenant.go +++ b/pkg/alertmanager/multitenant.go @@ -30,7 +30,6 @@ import ( amconfig "github.com/prometheus/alertmanager/config" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" - tsdb_errors "github.com/prometheus/prometheus/tsdb/errors" "github.com/weaveworks/common/httpgrpc" "github.com/weaveworks/common/httpgrpc/server" "github.com/weaveworks/common/user" @@ -386,11 +385,6 @@ func (h *handlerForGRPCServer) ServeHTTP(w http.ResponseWriter, req *http.Reques } func (am *MultitenantAlertmanager) starting(ctx context.Context) (err error) { - err = am.migrateStateFilesToPerTenantDirectories() - if err != nil { - return err - } - defer func() { if err == nil || am.subservices == nil { return @@ -453,119 +447,6 @@ func (am *MultitenantAlertmanager) starting(ctx context.Context) (err error) { return nil } -// migrateStateFilesToPerTenantDirectories migrates any existing configuration from old place to new hierarchy. -// TODO: Remove in Cortex 1.11. -func (am *MultitenantAlertmanager) migrateStateFilesToPerTenantDirectories() error { - migrate := func(from, to string) error { - level.Info(am.logger).Log("msg", "migrating alertmanager state", "from", from, "to", to) - err := os.Rename(from, to) - return errors.Wrapf(err, "failed to migrate alertmanager state from %v to %v", from, to) - } - - st, err := am.getObsoleteFilesPerUser() - if err != nil { - return errors.Wrap(err, "failed to migrate alertmanager state files") - } - - for userID, files := range st { - tenantDir := am.getTenantDirectory(userID) - err := os.MkdirAll(tenantDir, 0777) - if err != nil { - return errors.Wrapf(err, "failed to create per-tenant directory %v", tenantDir) - } - - errs := tsdb_errors.NewMulti() - - if files.notificationLogSnapshot != "" { - errs.Add(migrate(files.notificationLogSnapshot, filepath.Join(tenantDir, notificationLogSnapshot))) - } - - if files.silencesSnapshot != "" { - errs.Add(migrate(files.silencesSnapshot, filepath.Join(tenantDir, silencesSnapshot))) - } - - if files.templatesDir != "" { - errs.Add(migrate(files.templatesDir, filepath.Join(tenantDir, templatesDir))) - } - - if err := errs.Err(); err != nil { - return err - } - } - return nil -} - -type obsoleteStateFiles struct { - notificationLogSnapshot string - silencesSnapshot string - templatesDir string -} - -// getObsoleteFilesPerUser returns per-user set of files that should be migrated from old structure to new structure. -func (am *MultitenantAlertmanager) getObsoleteFilesPerUser() (map[string]obsoleteStateFiles, error) { - files, err := ioutil.ReadDir(am.cfg.DataDir) - if err != nil { - return nil, errors.Wrapf(err, "failed to list dir %v", am.cfg.DataDir) - } - - // old names - const ( - notificationLogPrefix = "nflog:" - silencesPrefix = "silences:" - templates = "templates" - ) - - result := map[string]obsoleteStateFiles{} - - for _, f := range files { - fullPath := filepath.Join(am.cfg.DataDir, f.Name()) - - if f.IsDir() { - // Process templates dir. - if f.Name() != templates { - // Ignore other files -- those are likely per tenant directories. - continue - } - - templateDirs, err := ioutil.ReadDir(fullPath) - if err != nil { - return nil, errors.Wrapf(err, "failed to list dir %v", fullPath) - } - - // Previously templates directory contained per-tenant subdirectory. - for _, d := range templateDirs { - if d.IsDir() { - v := result[d.Name()] - v.templatesDir = filepath.Join(fullPath, d.Name()) - result[d.Name()] = v - } else { - level.Warn(am.logger).Log("msg", "ignoring unknown local file while migrating local alertmanager state files", "file", filepath.Join(fullPath, d.Name())) - } - } - continue - } - - switch { - case strings.HasPrefix(f.Name(), notificationLogPrefix): - userID := strings.TrimPrefix(f.Name(), notificationLogPrefix) - v := result[userID] - v.notificationLogSnapshot = fullPath - result[userID] = v - - case strings.HasPrefix(f.Name(), silencesPrefix): - userID := strings.TrimPrefix(f.Name(), silencesPrefix) - v := result[userID] - v.silencesSnapshot = fullPath - result[userID] = v - - default: - level.Warn(am.logger).Log("msg", "ignoring unknown local data file while migrating local alertmanager state files", "file", fullPath) - } - } - - return result, nil -} - func (am *MultitenantAlertmanager) run(ctx context.Context) error { tick := time.NewTicker(am.cfg.PollInterval) defer tick.Stop() diff --git a/pkg/alertmanager/multitenant_test.go b/pkg/alertmanager/multitenant_test.go index f6f8169764..3cce08c039 100644 --- a/pkg/alertmanager/multitenant_test.go +++ b/pkg/alertmanager/multitenant_test.go @@ -596,39 +596,6 @@ receivers: } } -func TestMultitenantAlertmanager_migrateStateFilesToPerTenantDirectories(t *testing.T) { - ctx := context.Background() - - const ( - user1 = "user1" - user2 = "user2" - ) - - store := prepareInMemoryAlertStore() - require.NoError(t, store.SetAlertConfig(ctx, alertspb.AlertConfigDesc{ - User: user2, - RawConfig: simpleConfigOne, - Templates: []*alertspb.TemplateDesc{}, - })) - - reg := prometheus.NewPedanticRegistry() - cfg := mockAlertmanagerConfig(t) - am, err := createMultitenantAlertmanager(cfg, nil, store, nil, nil, log.NewNopLogger(), reg) - require.NoError(t, err) - - createFile(t, filepath.Join(cfg.DataDir, "nflog:"+user1)) - createFile(t, filepath.Join(cfg.DataDir, "silences:"+user1)) - createFile(t, filepath.Join(cfg.DataDir, "nflog:"+user2)) - createFile(t, filepath.Join(cfg.DataDir, "templates", user2, "template.tpl")) - - require.NoError(t, am.migrateStateFilesToPerTenantDirectories()) - require.True(t, fileExists(t, filepath.Join(cfg.DataDir, user1, notificationLogSnapshot))) - require.True(t, fileExists(t, filepath.Join(cfg.DataDir, user1, silencesSnapshot))) - require.True(t, fileExists(t, filepath.Join(cfg.DataDir, user2, notificationLogSnapshot))) - require.True(t, dirExists(t, filepath.Join(cfg.DataDir, user2, templatesDir))) - require.True(t, fileExists(t, filepath.Join(cfg.DataDir, user2, templatesDir, "template.tpl"))) -} - func fileExists(t *testing.T, path string) bool { return checkExists(t, path, false) } From 0448fdd6ecd4bcf6e3f533287ceb5e43a8a7d734 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20=C5=A0tibran=C3=BD?= Date: Tue, 28 Jun 2022 12:05:08 +0200 Subject: [PATCH 60/63] Signal that 2.2 release is now in progress. (#2254) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Peter Štibraný --- CHANGELOG.md | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 718d084792..6b580ef970 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -4,6 +4,21 @@ ### Grafana Mimir +### Mixin + +### Jsonnet + +### Mimirtool + +### Mimir Continuous Test + +### Documentation + + +## 2.2.0-rc.0 + +### Grafana Mimir + * [CHANGE] Increased default configuration for `-server.grpc-max-recv-msg-size-bytes` and `-server.grpc-max-send-msg-size-bytes` from 4MB to 100MB. #1883 * [CHANGE] Default values have changed for the following settings. This improves query performance for recent data (within 12h) by only reading from ingesters: #1909 #1921 - `-blocks-storage.bucket-store.ignore-blocks-within` now defaults to `10h` (previously `0`) @@ -122,7 +137,9 @@ * [BUGFIX] Fixed Mimir Alertmanager datasource in Grafana used by "Play with Grafana Mimir" tutorial. #2115 ## 2.1.0 + ### Grafana Mimir + * [CHANGE] Compactor: No longer upload debug meta files to object storage. #1257 * [CHANGE] Default values have changed for the following settings: #1547 - `-alertmanager.alertmanager-client.grpc-max-recv-msg-size` now defaults to 100 MiB (previously was not configurable and set to 16 MiB) From f11fd031003bba3a42fd67cfc9ca336f85197b09 Mon Sep 17 00:00:00 2001 From: George Krajcsovits Date: Tue, 28 Jun 2022 13:10:22 +0200 Subject: [PATCH 61/63] Helm: add a step to contributing doc (#2257) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This is a reminder for people how to check for new versions. Signed-off-by: György Krajcsovits --- docs/internal/contributing/contributing-to-helm-chart.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/internal/contributing/contributing-to-helm-chart.md b/docs/internal/contributing/contributing-to-helm-chart.md index 967e6b6fc9..8e9bcc945e 100644 --- a/docs/internal/contributing/contributing-to-helm-chart.md +++ b/docs/internal/contributing/contributing-to-helm-chart.md @@ -24,6 +24,8 @@ If version increase is need, the version is set in the chart itself [operations/ Once a PR that updates the chart version is merged to `main`, it takes a couple of minutes for it to be published in [https://grafana.github.io/helm-charts](https://grafana.github.io/helm-charts) Helm repository. +Update the Helm repository to refresh the list of available charts, by using the command `helm repo update`. + In order to search, template, install, upgrade, etc beta versions of charts, Helm commands require the user to specify the `--devel` flag. This means that checking for whether the beta version is published should be done with `helm search repo --devel`. ## Linting From 77eaa87d43a6753c99564b33f90c879ec38f3cba Mon Sep 17 00:00:00 2001 From: zenador Date: Tue, 28 Jun 2022 19:15:00 +0800 Subject: [PATCH 62/63] Rename codified errors to errors catalog (#2256) --- docs/sources/operators-guide/mimir-runbooks/_index.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/sources/operators-guide/mimir-runbooks/_index.md b/docs/sources/operators-guide/mimir-runbooks/_index.md index e4c976d0d9..99875decd0 100644 --- a/docs/sources/operators-guide/mimir-runbooks/_index.md +++ b/docs/sources/operators-guide/mimir-runbooks/_index.md @@ -1037,7 +1037,7 @@ How to **investigate**: 1. The alert fired because of a bug in Mimir: fix it. 1. The alert fired because of a bug or edge case in the continuous test tool, causing a false positive: fix it. -## Codified errors +## Errors catalog Mimir has some codified error IDs that you might see in HTTP responses or logs. These error IDs allow you to read related details in the documentation that follows. From 8fabc8351ce0bd61806a3ad22776f4a5f65a5037 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Peter=20=C5=A0tibran=C3=BD?= Date: Tue, 28 Jun 2022 17:02:25 +0200 Subject: [PATCH 63/63] Add new section on website for links to blog posts, podcasts and talks. (#2216) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add a new reference for learning resources with links to blog posts, podcasts, and videos Signed-off-by: Peter Štibraný Co-authored-by: Ursula Kallio --- .../reference-learning-resources/_index.md | 22 +++++++++++++++++++ 1 file changed, 22 insertions(+) create mode 100644 docs/sources/operators-guide/reference-learning-resources/_index.md diff --git a/docs/sources/operators-guide/reference-learning-resources/_index.md b/docs/sources/operators-guide/reference-learning-resources/_index.md new file mode 100644 index 0000000000..ad43889127 --- /dev/null +++ b/docs/sources/operators-guide/reference-learning-resources/_index.md @@ -0,0 +1,22 @@ +--- +title: "Reference: Learning resources" +menuTitle: "Reference: Learning resources" +description: "Blog posts, podcasts, and videos about Grafana Mimir" +weight: 130 +keywords: + - Grafana Mimir blog posts, podcasts, and videos +--- + +# Reference: Learning resources + +To learn more about Grafana Mimir, see the following resources: + +- June 2022 GrafanaCon 2022 talk: "[For billion-series scale or home IoT projects, get started in minutes with Grafana Mimir (using Helm)](https://grafana.com/go/grafanaconline/2022/grafana-mimir-migrate-your-metrics-in-minutes/)" +- May 2022 podcast episode: [Grafana's Big Tent podcast: Grafana Mimir: Maintainers tell all](https://bigtent.fm/4), [transcript](https://grafana.com/blog/2022/05/03/grafana-mimir-maintainers-tell-all/) +- May 2022 blog post: "[Scaling Grafana Mimir to 500 million active series on customer infrastructure with Grafana Enterprise Metrics](https://grafana.com/blog/2022/05/24/scaling-grafana-mimir-to-500-million-active-series-on-customer-infrastructure-with-grafana-enterprise-metrics/)" +- April 2022 blog post: "[How Grafana Mimir’s split-and-merge compactor enables scaling metrics to 1 billion active series](https://grafana.com/blog/2022/04/19/how-grafana-mimirs-split-and-merge-compactor-enables-scaling-metrics-to-1-billion-active-series/)" +- April 2022 video tutorial: "[How to migrate to Grafana Mimir in less than 4 minutes](https://grafana.com/blog/2022/04/25/video-how-to-migrate-to-grafana-mimir-in-less-than-4-minutes/)" +- April 2022 blog post: "[How we scaled our new Prometheus TSDB Grafana Mimir to 1 billion active series](https://grafana.com/blog/2022/04/08/how-we-scaled-our-new-prometheus-tsdb-grafana-mimir-to-1-billion-active-series/)" +- April 2022 video tutorial: "[Get started with Grafana Mimir in minutes](https://grafana.com/blog/2022/04/15/video-get-started-with-grafana-mimir-in-minutes/)" +- March 2022 blog post "[Grafana Mimir Q&A with Grafana Labs CEO Raj Dutt](https://grafana.com/blog/2022/03/30/qa-with-our-ceo-about-grafana-mimir/)" +- March 2022 blog post "[Announcing Grafana Mimir, the most scalable open source TSDB in the world](https://grafana.com/blog/2022/03/30/announcing-grafana-mimir/)"