Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

failed to snapshot table for reads - invalid database #8660

Closed
thiDucTran opened this issue Mar 1, 2023 · 1 comment
Closed

failed to snapshot table for reads - invalid database #8660

thiDucTran opened this issue Mar 1, 2023 · 1 comment

Comments

@thiDucTran
Copy link

thiDucTran commented Mar 1, 2023

Describe the bug
repeated errors like below

kubectl logs --tail=50 --follow --selector app.kubernetes.io/instance=loki | grep -i -B3 'error' | grep -i index

level=info ts=2023-02-27T06:17:19.052457169Z caller=table.go:334 msg="finished handing over table loki_index_19415"
level=error ts=2023-02-27T06:17:19.1633127Z caller=table_manager.go:179 msg="failed to snapshot table for reads" table=loki_index_19415 err="invalid database"
level=error ts=2023-02-27T06:17:25.101148875Z caller=table_manager.go:179 msg="failed to snapshot table for reads" table=loki_index_19415 err="invalid database"

To Reproduce
Steps to reproduce the behavior:

  1. Deployed loki via HELM chart/version: grafana/loki --version 4.7.0 ..below is values.yaml
---
global:
  image:
    # -- Overrides the Docker registry globally for all images
    registry: null
  # -- Overrides the priorityClassName for all pods
  priorityClassName: null
  # -- configures cluster domain ("cluster.local" by default)
  clusterDomain: "cluster.local"
  # -- configures DNS service name
  dnsService: "kube-dns"
  # -- configures DNS service namespace
  dnsNamespace: "kube-system"

# -- Overrides the chart's name
nameOverride: null

# -- Overrides the chart's computed fullname
fullnameOverride: null

# -- Image pull secrets for Docker images
imagePullSecrets: []

kubectlImage:
  # -- The Docker registry
  registry: docker.io
  # -- Docker image repository
  repository: bitnami/kubectl
  # -- Overrides the image tag whose default is the chart's appVersion
  tag: null
  # -- Docker image pull policy
  pullPolicy: IfNotPresent

loki:
  # Configures the readiness probe for all of the Loki pods
  readinessProbe:
    httpGet:
      path: /ready
      port: http-metrics
    initialDelaySeconds: 30
    timeoutSeconds: 1
  image:
    # -- The Docker registry
    registry: docker.io
    # -- Docker image repository
    repository: grafana/loki
    # -- Overrides the image tag whose default is the chart's appVersion
    # TODO: needed for 3rd target backend functionality
    # revert to null or latest once this behavior is relased
    tag: null
    # -- Docker image pull policy
    pullPolicy: IfNotPresent
  # -- Common annotations for all pods
  podAnnotations: {}
  # -- Common labels for all pods
  podLabels: {}
  # -- The number of old ReplicaSets to retain to allow rollback
  revisionHistoryLimit: 10
  # -- The SecurityContext for Loki pods
  podSecurityContext:
    fsGroup: 10001
    runAsGroup: 10001
    runAsNonRoot: true
    runAsUser: 10001
  # -- The SecurityContext for Loki containers
  containerSecurityContext:
    readOnlyRootFilesystem: true
    capabilities:
      drop:
        - ALL
    allowPrivilegeEscalation: false
  # -- Should enableServiceLinks be enabled. Default to enable
  enableServiceLinks: true
  # -- Specify an existing secret containing loki configuration. If non-empty, overrides `loki.config`
  existingSecretForConfig: ""
  # -- Config file contents for Loki
  # @default -- See values.yaml
  config: |
    {{- if .Values.enterprise.enabled}}
    {{- tpl .Values.enterprise.config . }}
    {{- else }}
    auth_enabled: {{ .Values.loki.auth_enabled }}
    {{- end }}

    {{- with .Values.loki.server }}
    server:
      {{- toYaml . | nindent 2}}
    {{- end}}

    memberlist:
      join_members:
        - {{ include "loki.memberlist" . }}
        {{- with .Values.migrate.fromDistributed }}
        {{- if .enabled }}
        - {{ .memberlistService }}
        {{- end }}
        {{- end }}

    {{- with .Values.loki.ingester }}
    ingester:
      {{- tpl (. | toYaml) $ | nindent 4 }}
    {{- end }}

    {{- if .Values.loki.commonConfig}}
    common:
    {{- toYaml .Values.loki.commonConfig | nindent 2}}
      storage:
      {{- include "loki.commonStorageConfig" . | nindent 4}}
    {{- end}}

    {{- with .Values.loki.limits_config }}
    limits_config:
      {{- tpl (. | toYaml) $ | nindent 4 }}
    {{- end }}

    runtime_config:
      file: /etc/loki/runtime-config/runtime-config.yaml

    {{- with .Values.loki.memcached.chunk_cache }}
    {{- if and .enabled (or .host .addresses) }}
    chunk_store_config:
      chunk_cache_config:
        memcached:
          batch_size: {{ .batch_size }}
          parallelism: {{ .parallelism }}
        memcached_client:
          {{- if .host }}
          host: {{ .host }}
          {{- end }}
          {{- if .addresses }}
          addresses: {{ .addresses }}
          {{- end }}
          service: {{ .service }}
    {{- end }}
    {{- end }}

    {{- if .Values.loki.schemaConfig}}
    schema_config:
    {{- toYaml .Values.loki.schemaConfig | nindent 2}}
    {{- else }}
    schema_config:
      configs:
        - from: 2022-01-11
          store: boltdb-shipper
          object_store: {{ .Values.loki.storage.type }}
          schema: v12
          index:
            prefix: loki_index_
            period: 24h
    {{- end }}

    {{ include "loki.rulerConfig" . }}

    table_manager:
      retention_deletes_enabled: false
      retention_period: 0

    {{- with .Values.loki.memcached.results_cache }}
    query_range:
      align_queries_with_step: true
      {{- if and .enabled (or .host .addresses) }}
      cache_results: {{ .enabled }}
      results_cache:
        cache:
          default_validity: {{ .default_validity }}
          memcached_client:
            {{- if .host }}
            host: {{ .host }}
            {{- end }}
            {{- if .addresses }}
            addresses: {{ .addresses }}
            {{- end }}
            service: {{ .service }}
            timeout: {{ .timeout }}
      {{- end }}
    {{- end }}

    {{- with .Values.loki.storage_config }}
    storage_config:
      {{- tpl (. | toYaml) $ | nindent 4 }}
    {{- end }}

    {{- with .Values.loki.query_scheduler }}
    query_scheduler:
      {{- tpl (. | toYaml) $ | nindent 4 }}
    {{- end }}

    {{- with .Values.loki.compactor }}
    compactor:
      {{- tpl (. | toYaml) $ | nindent 4 }}
    {{- end }}

    {{- with .Values.loki.analytics }}
    analytics:
      {{- tpl (. | toYaml) $ | nindent 4 }}
    {{- end }}

    {{- with .Values.loki.querier }}
    querier:
      {{- tpl (. | toYaml) $ | nindent 4 }}
    {{- end }}

  # Should authentication be enabled
  auth_enabled: false
  # -- Tenants list to be created on nginx htpasswd file, with name and password keys
  tenants: []

  # -- Check https://grafana.com/docs/loki/latest/configuration/#server for more info on the server configuration.
  server:
    http_listen_port: 3100
    grpc_listen_port: 9095

  # -- Limits config
  limits_config:
    enforce_metric_name: false
    reject_old_samples: true
    reject_old_samples_max_age: 168h
    max_cache_freshness_per_query: 10m
    split_queries_by_interval: 15m

  # -- Provides a reloadable runtime configuration file for some specific configuration
  runtimeConfig: {}

  # -- Check https://grafana.com/docs/loki/latest/configuration/#common_config for more info on how to provide a common configuration
  commonConfig:
    path_prefix: /var/loki
    replication_factor: 2
    compactor_address: '{{ include "loki.compactorAddress" . }}'

  # -- Storage config. Providing this will automatically populate all necessary storage configs in the templated config.
  storage:
    bucketNames:
      chunks: chunks
      ruler: ruler
      admin: admin
    type: azure
    # s3:
    #   s3: null
    #   endpoint: null
    #   region: null
    #   secretAccessKey: null
    #   accessKeyId: null
    #   s3ForcePathStyle: false
    #   insecure: false
    #   http_config: {}
    # gcs:
    #   chunkBufferSize: 0
    #   requestTimeout: "0s"
    #   enableHttp2: true
    azure:
      accountName: lokistoragedev
      accountKey: 123456
    # filesystem:
    #   chunks_directory: /var/loki/chunks
    #   rules_directory: /var/loki/rules

  # -- Configure memcached as an external cache for chunk and results cache. Disabled by default
  # must enable and specify a host for each cache you would like to use.
  memcached:
    chunk_cache:
      enabled: false
      host: ""
      service: "memcached-client"
      batch_size: 256
      parallelism: 10
    results_cache:
      enabled: false
      host: ""
      service: "memcached-client"
      timeout: "500ms"
      default_validity: "12h"

  # -- Check https://grafana.com/docs/loki/latest/configuration/#schema_config for more info on how to configure schemas
  schemaConfig: {}

  # -- Check https://grafana.com/docs/loki/latest/configuration/#ruler for more info on configuring ruler
  rulerConfig: {}

  # -- Structured loki configuration, takes precedence over `loki.config`, `loki.schemaConfig`, `loki.storageConfig`
  structuredConfig: {}

  # -- Additional query scheduler config
  query_scheduler: {}

  # -- Additional storage config
  storage_config:
    hedging:
      at: "250ms"
      max_per_second: 20
      up_to: 3

  # --  Optional compactor configuration
  compactor: {}

  # --  Optional analytics configuration
  analytics: {}

  # --  Optional querier configuration
  querier: {}

  # --  Optional ingester configuration
  ingester: {}

enterprise:
  # Enable enterprise features, license must be provided
  enabled: false

  # Default verion of GEL to deploy
  version: v1.6.1

  # -- Optional name of the GEL cluster, otherwise will use .Release.Name
  # The cluster name must match what is in your GEL license
  cluster_name: null

  # -- Grafana Enterprise Logs license
  # In order to use Grafana Enterprise Logs features, you will need to provide
  # the contents of your Grafana Enterprise Logs license, either by providing the
  # contents of the license.jwt, or the name Kubernetes Secret that contains your
  # license.jwt.
  # To set the license contents, use the flag `--set-file 'license.contents=./license.jwt'`
  license:
    contents: "NOTAVALIDLICENSE"

  # -- Set to true when providing an external license
  useExternalLicense: false

  # -- Name of external license secret to use
  externalLicenseName: null

  # -- Name of the external config secret to use
  externalConfigName: ""

  # -- If enabled, the correct admin_client storage will be configured. If disabled while running enterprise,
  # make sure auth is set to `type: trust`, or that `auth_enabled` is set to `false`.
  adminApi:
    enabled: true

  # enterprise specific sections of the config.yaml file
  config: |
    {{- if .Values.enterprise.adminApi.enabled }}
    {{- if or .Values.minio.enabled (eq .Values.loki.storage.type "s3") (eq .Values.loki.storage.type "gcs") (eq .Values.loki.storage.type "azure") }}
    admin_client:
      storage:
        s3:
          bucket_name: {{ .Values.loki.storage.bucketNames.admin }}
    {{- end }}
    {{- end }}
    auth:
      type: {{ .Values.enterprise.adminApi.enabled | ternary "enterprise" "trust" }}
    auth_enabled: {{ .Values.loki.auth_enabled }}
    cluster_name: {{ include "loki.clusterName" . }}
    license:
      path: /etc/loki/license/license.jwt

  image:
    # -- The Docker registry
    registry: docker.io
    # -- Docker image repository
    repository: grafana/enterprise-logs
    # -- Docker image tag
    # TODO: needed for 3rd target backend functionality
    # revert to null or latest once this behavior is relased
    tag: main-96f32b9f
    # -- Docker image pull policy
    pullPolicy: IfNotPresent

  adminToken:
    # -- Alternative name for admin token secret, needed by tokengen and provisioner jobs
    secret: null
    # -- Additional namespace to also create the token in. Useful if your Grafana instance
    # is in a different namespace
    additionalNamespaces: []

  # -- Alternative name of the secret to store token for the canary
  canarySecret: null

  # -- Configuration for `tokengen` target
  tokengen:
    # -- Whether the job should be part of the deployment
    enabled: true
    # -- Comma-separated list of Loki modules to load for tokengen
    targetModule: "tokengen"
    # -- Additional CLI arguments for the `tokengen` target
    extraArgs: []
    # -- Additional Kubernetes environment
    env: []
    # -- Additional labels for the `tokengen` Job
    labels: {}
    # -- Additional annotations for the `tokengen` Job
    annotations: {}
    # -- Tolerations for tokengen Job
    tolerations: []
    # -- Additional volumes for Pods
    extraVolumes: []
    # -- Additional volume mounts for Pods
    extraVolumeMounts: []
    # -- Run containers as user `enterprise-logs(uid=10001)`
    securityContext:
      runAsNonRoot: true
      runAsGroup: 10001
      runAsUser: 10001
      fsGroup: 10001
    # -- Environment variables from secrets or configmaps to add to the tokengen pods
    extraEnvFrom: []
    # -- The name of the PriorityClass for tokengen Pods
    priorityClassName: ""

  # -- Configuration for `provisioner` target
  provisioner:
    # -- Whether the job should be part of the deployment
    enabled: true
    # -- Name of the secret to store provisioned tokens in
    provisionedSecretPrefix: null
    # -- Additional tenants to be created. Each tenant will get a read and write policy
    # and associated token. Tenant must have a name and a namespace for the secret containting
    # the token to be created in. For example
    # additionalTenants:
    #   - name: loki
    #     secretNamespace: grafana
    additionalTenants: []
    # -- Additional Kubernetes environment
    env: []
    # -- Additional labels for the `provisioner` Job
    labels: {}
    # -- Additional annotations for the `provisioner` Job
    annotations: {}
    # -- The name of the PriorityClass for provisioner Job
    priorityClassName: null
    # -- Run containers as user `enterprise-logs(uid=10001)`
    securityContext:
      runAsNonRoot: true
      runAsGroup: 10001
      runAsUser: 10001
      fsGroup: 10001
    # -- Provisioner image to Utilize
    image:
      # -- The Docker registry
      registry: docker.io
      # -- Docker image repository
      repository: grafana/enterprise-logs-provisioner
      # -- Overrides the image tag whose default is the chart's appVersion
      tag: null
      # -- Docker image pull policy
      pullPolicy: IfNotPresent
    # -- Volume mounts to add to the provisioner pods
    extraVolumeMounts: []

# -- Options that may be necessary when performing a migration from another helm chart
migrate:
  # -- When migrating from a distributed chart like loki-distributed or enterprise-logs
  fromDistributed:
    # -- Set to true if migrating from a distributed helm chart
    enabled: false
    # -- If migrating from a distributed service, provide the distributed deployment's
    # memberlist service DNS so the new deployment can join it's ring.
    memberlistService: ""

serviceAccount:
  # -- Specifies whether a ServiceAccount should be created
  create: true
  # -- The name of the ServiceAccount to use.
  # If not set and create is true, a name is generated using the fullname template
  name: null
  # -- Image pull secrets for the service account
  imagePullSecrets: []
  # -- Annotations for the service account
  annotations: {}
  # -- Labels for the service account
  labels: {}
  # -- Set this toggle to false to opt out of automounting API credentials for the service account
  automountServiceAccountToken: true

# RBAC configuration
rbac:
  # -- If pspEnabled true, a PodSecurityPolicy is created for K8s that use psp.
  pspEnabled: false
  # -- For OpenShift set pspEnabled to 'false' and sccEnabled to 'true' to use the SecurityContextConstraints.
  sccEnabled: false

# -- Section for configuring optional Helm test
test:
  enabled: false
  # -- Address of the prometheus server to query for the test
  prometheusAddress: "http://prometheus:9090"
  # -- Number of times to retry the test before failing
  timeout: 1m
  # -- Additional labels for the test pods
  labels: {}
  # -- Additional annotations for test pods
  annotations: {}
  # -- Image to use for loki canary
  image:
    # -- The Docker registry
    registry: docker.io
    # -- Docker image repository
    repository: grafana/loki-helm-test
    # -- Overrides the image tag whose default is the chart's appVersion
    tag: null
    # -- Docker image pull policy
    pullPolicy: IfNotPresent

# Monitoring section determines which monitoring features to enable
monitoring:
  # Dashboards for monitoring Loki
  dashboards:
    # -- If enabled, create configmap with dashboards for monitoring Loki
    enabled: true
    # -- Alternative namespace to create dashboards ConfigMap in
    namespace: null
    # -- Additional annotations for the dashboards ConfigMap
    annotations: {}
    # -- Labels for the dashboards ConfigMap
    labels:
      grafana_dashboard: "1"

  # Recording rules for monitoring Loki, required for some dashboards
  rules:
    # -- If enabled, create PrometheusRule resource with Loki recording rules
    enabled: true
    # -- Include alerting rules
    alerting: true
    # -- Alternative namespace to create PrometheusRule resources in
    namespace: null
    # -- Additional annotations for the rules PrometheusRule resource
    annotations: {}
    # -- Additional labels for the rules PrometheusRule resource
    labels: {}
    # -- Additional groups to add to the rules file
    additionalGroups: []
    # - name: additional-loki-rules
    #   rules:
    #     - record: job:loki_request_duration_seconds_bucket:sum_rate
    #       expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, job)
    #     - record: job_route:loki_request_duration_seconds_bucket:sum_rate
    #       expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, job, route)
    #     - record: node_namespace_pod_container:container_cpu_usage_seconds_total:sum_rate
    #       expr: sum(rate(container_cpu_usage_seconds_total[1m])) by (node, namespace, pod, container)

  # ServiceMonitor configuration
  serviceMonitor:
    # -- If enabled, ServiceMonitor resources for Prometheus Operator are created
    enabled: true
    # -- Namespace selector for ServiceMonitor resources
    namespaceSelector: {}
    # -- ServiceMonitor annotations
    annotations: {}
    # -- Additional ServiceMonitor labels
    labels: {}
    # -- ServiceMonitor scrape interval
    # Default is 15s because included recording rules use a 1m rate, and scrape interval needs to be at
    # least 1/4 rate interval.
    interval: 15s
    # -- ServiceMonitor scrape timeout in Go duration format (e.g. 15s)
    scrapeTimeout: null
    # -- ServiceMonitor relabel configs to apply to samples before scraping
    # https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
    relabelings: []
    # -- ServiceMonitor will use http by default, but you can pick https as well
    scheme: http
    # -- ServiceMonitor will use these tlsConfig settings to make the health check requests
    tlsConfig: null
    # -- If defined, will create a MetricsInstance for the Grafana Agent Operator.
    metricsInstance:
      # -- If enabled, MetricsInstance resources for Grafana Agent Operator are created
      enabled: true
      # -- MetricsInstance annotations
      annotations: {}
      # -- Additional MetricsInstance labels
      labels: {}
      # -- If defined a MetricsInstance will be created to remote write metrics.
      remoteWrite: null

  # Self monitoring determines whether Loki should scrape it's own logs.
  # This feature currently relies on the Grafana Agent Operator being installed,
  # which is installed by default using the grafana-agent-operator sub-chart.
  # It will create custom resources for GrafanaAgent, LogsInstance, and PodLogs to configure
  # scrape configs to scrape it's own logs with the labels expected by the included dashboards.
  selfMonitoring:
    enabled: true

    # -- Tenant to use for self monitoring
    tenant:
      # -- Name of the tenant
      name: "self-monitoring"
      # -- Namespace to create additional tenant token secret in. Useful if your Grafana instance
      # is in a separate namespace. Token will still be created in the canary namespace.
      secretNamespace: "{{ .Release.Namespace }}"

    # Grafana Agent configuration
    grafanaAgent:
      # -- Controls whether to install the Grafana Agent Operator and its CRDs.
      # Note that helm will not install CRDs if this flag is enabled during an upgrade.
      # In that case install the CRDs manually from https://github.com/grafana/agent/tree/main/production/operator/crds
      installOperator: true
      # -- Grafana Agent annotations
      annotations: {}
      # -- Additional Grafana Agent labels
      labels: {}
      # -- Enable the config read api on port 8080 of the agent
      enableConfigReadAPI: false

    # PodLogs configuration
    podLogs:
      # -- PodLogs annotations
      annotations: {}
      # -- Additional PodLogs labels
      labels: {}
      # -- PodLogs relabel configs to apply to samples before scraping
      # https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
      relabelings: []

    # LogsInstance configuration
    logsInstance:
      # -- LogsInstance annotations
      annotations: {}
      # -- Additional LogsInstance labels
      labels: {}
      # -- Additional clients for remote write
      clients: null

  # The Loki canary pushes logs to and queries from this loki installation to test
  # that it's working correctly
  lokiCanary:
    enabled: true
    # -- Additional annotations for the `loki-canary` Daemonset
    annotations: {}
    # -- Additional CLI arguments for the `loki-canary' command
    extraArgs: []
    # -- Environment variables to add to the canary pods
    extraEnv: []
    # -- Environment variables from secrets or configmaps to add to the canary pods
    extraEnvFrom: []
    # -- Resource requests and limits for the canary
    resources: {}
    # -- Node selector for canary pods
    nodeSelector: {}
    # -- Tolerations for canary pods
    tolerations: []
    # -- Image to use for loki canary
    image:
      # -- The Docker registry
      registry: docker.io
      # -- Docker image repository
      repository: grafana/loki-canary
      # -- Overrides the image tag whose default is the chart's appVersion
      tag: null
      # -- Docker image pull policy
      pullPolicy: IfNotPresent

# Configuration for the write pod(s)
write:
  # -- Number of replicas for the write
  replicas: 2
  image:
    # -- The Docker registry for the write image. Overrides `loki.image.registry`
    registry: null
    # -- Docker image repository for the write image. Overrides `loki.image.repository`
    repository: null
    # -- Docker image tag for the write image. Overrides `loki.image.tag`
    tag: null
  # -- The name of the PriorityClass for write pods
  priorityClassName: null
  # -- Annotations for write pods
  podAnnotations: {}
  # -- Additional labels for each `write` pod
  podLabels: {}
  # -- Additional selector labels for each `write` pod
  selectorLabels: {}
  # -- Labels for ingester service
  serviceLabels: {}
  # -- Comma-separated list of Loki modules to load for the write
  targetModule: "write"
  # -- Additional CLI args for the write
  extraArgs: []
  # -- Environment variables to add to the write pods
  extraEnv: []
  # -- Environment variables from secrets or configmaps to add to the write pods
  extraEnvFrom: []
  # -- Lifecycle for the write container
  lifecycle: {}
  # -- Init containers to add to the write pods
  initContainers: []
  # -- Volume mounts to add to the write pods
  extraVolumeMounts: []
  # -- Volumes to add to the write pods
  extraVolumes: []
  # -- Resource requests and limits for the write
  resources: {}
  # -- Grace period to allow the write to shutdown before it is killed. Especially for the ingester,
  # this must be increased. It must be long enough so writes can be gracefully shutdown flushing/transferring
  # all data and to successfully leave the member ring on shutdown.
  terminationGracePeriodSeconds: 300
  # -- Affinity for write pods. Passed through `tpl` and, thus, to be configured as string
  # @default -- Hard node and soft zone anti-affinity
  affinity: |
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        - labelSelector:
            matchLabels:
              {{- include "loki.writeSelectorLabels" . | nindent 10 }}
          topologyKey: kubernetes.io/hostname
    nodeAffinity:
      # requiredDuringSchedulingIgnoredDuringExecution: The scheduler can't schedule the Pod unless the rule is met. This functions like nodeSelector, but with a more expressive syntax.
      # preferredDuringSchedulingIgnoredDuringExecution: The scheduler tries to find a node that meets the rule. If a matching node is not available, the scheduler still schedules the Pod.
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchExpressions:
          # Targeting only User nodepool(s)
          - key: kubernetes.azure.com/mode
            operator: In
            values:
            - user
  # -- Node selector for write pods
  nodeSelector: {}
  # -- Tolerations for write pods
  tolerations: []
  persistence:
    # -- Enable StatefulSetAutoDeletePVC feature
    enableStatefulSetAutoDeletePVC: false
    # -- Size of persistent disk
    size: 10Gi
    # -- Storage class to be used.
    # If defined, storageClassName: <storageClass>.
    # If set to "-", storageClassName: "", which disables dynamic provisioning.
    # If empty or set to null, no storageClassName spec is
    # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack).
    storageClass: azurefile-loki
    # -- Selector for persistent disk
    selector: null

# Configuration for the table-manager
tableManager:
  # -- Specifies whether the table-manager should be enabled
  enabled: false
  image:
    # -- The Docker registry for the table-manager image. Overrides `loki.image.registry`
    registry: null
    # -- Docker image repository for the table-manager image. Overrides `loki.image.repository`
    repository: null
    # -- Docker image tag for the table-manager image. Overrides `loki.image.tag`
    tag: null
  # -- Command to execute instead of defined in Docker image
  command: null
  # -- The name of the PriorityClass for table-manager pods
  priorityClassName: null
  # -- Labels for table-manager pods
  podLabels: {}
  # -- Annotations for table-manager pods
  podAnnotations: {}
  # -- Labels for table-manager service
  serviceLabels: {}
  # -- Additional CLI args for the table-manager
  extraArgs: []
  # -- Environment variables to add to the table-manager pods
  extraEnv: []
  # -- Environment variables from secrets or configmaps to add to the table-manager pods
  extraEnvFrom: []
  # -- Volume mounts to add to the table-manager pods
  extraVolumeMounts: []
  # -- Volumes to add to the table-manager pods
  extraVolumes: []
  # -- Resource requests and limits for the table-manager
  resources: {}
  # -- Containers to add to the table-manager pods
  extraContainers: []
  # -- Grace period to allow the table-manager to shutdown before it is killed
  terminationGracePeriodSeconds: 30
  # -- Affinity for table-manager pods. Passed through `tpl` and, thus, to be configured as string
  # @default -- Hard node and soft zone anti-affinity
  affinity: |
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        - labelSelector:
            matchLabels:
              {{- include "loki.tableManagerSelectorLabels" . | nindent 10 }}
          topologyKey: kubernetes.io/hostname
      preferredDuringSchedulingIgnoredDuringExecution:
        - weight: 100
          podAffinityTerm:
            labelSelector:
              matchLabels:
                {{- include "loki.tableManagerSelectorLabels" . | nindent 12 }}
            topologyKey: failure-domain.beta.kubernetes.io/zone
    nodeAffinity:
      # requiredDuringSchedulingIgnoredDuringExecution: The scheduler can't schedule the Pod unless the rule is met. This functions like nodeSelector, but with a more expressive syntax.
      # preferredDuringSchedulingIgnoredDuringExecution: The scheduler tries to find a node that meets the rule. If a matching node is not available, the scheduler still schedules the Pod.
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchExpressions:
          # Targeting only User nodepool(s)
          - key: kubernetes.azure.com/mode
            operator: In
            values:
            - user
  # -- Node selector for table-manager pods
  nodeSelector: {}
  # -- Tolerations for table-manager pods
  tolerations: []

# Configuration for the read pod(s)
read:
  # -- Number of replicas for the read
  replicas: 2
  autoscaling:
    # -- Enable autoscaling for the read, this is only used if `queryIndex.enabled: true`
    enabled: false
    # -- Minimum autoscaling replicas for the read
    minReplicas: 1
    # -- Maximum autoscaling replicas for the read
    maxReplicas: 3
    # -- Target CPU utilisation percentage for the read
    targetCPUUtilizationPercentage: 60
    # -- Target memory utilisation percentage for the read
    targetMemoryUtilizationPercentage:
  image:
    # -- The Docker registry for the read image. Overrides `loki.image.registry`
    registry: null
    # -- Docker image repository for the read image. Overrides `loki.image.repository`
    repository: null
    # -- Docker image tag for the read image. Overrides `loki.image.tag`
    tag: null
  # -- The name of the PriorityClass for read pods
  priorityClassName: null
  # -- Annotations for read pods
  podAnnotations: {}
  # -- Additional labels for each `read` pod
  podLabels: {}
  # -- Additional selector labels for each `read` pod
  selectorLabels: {}
  # -- Labels for read service
  serviceLabels: {}
  # -- Comma-separated list of Loki modules to load for the read
  targetModule: "read"
  # -- Whether or not to use the 2 target type simple scalable mode (read, write) or the
  # 3 target type (read, write, backend). Legacy refers to the 2 target type, so true will
  # run two targets, false will run 3 targets.
  legacyReadTarget: true
  # -- Additional CLI args for the read
  extraArgs: []
  # -- Environment variables to add to the read pods
  extraEnv: []
  # -- Environment variables from secrets or configmaps to add to the read pods
  extraEnvFrom: []
  # -- Lifecycle for the read container
  lifecycle: {}
  # -- Volume mounts to add to the read pods
  extraVolumeMounts: []
  # -- Volumes to add to the read pods
  extraVolumes: []
  # -- Resource requests and limits for the read
  resources: {}
  # -- Grace period to allow the read to shutdown before it is killed
  terminationGracePeriodSeconds: 30
  # -- Affinity for read pods. Passed through `tpl` and, thus, to be configured as string
  # @default -- Hard node and soft zone anti-affinity
  affinity: |
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        - labelSelector:
            matchLabels:
              {{- include "loki.readSelectorLabels" . | nindent 10 }}
          topologyKey: kubernetes.io/hostname
    nodeAffinity:
      # requiredDuringSchedulingIgnoredDuringExecution: The scheduler can't schedule the Pod unless the rule is met. This functions like nodeSelector, but with a more expressive syntax.
      # preferredDuringSchedulingIgnoredDuringExecution: The scheduler tries to find a node that meets the rule. If a matching node is not available, the scheduler still schedules the Pod.
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchExpressions:
          # Targeting only User nodepool(s)
          - key: kubernetes.azure.com/mode
            operator: In
            values:
            - user
  # -- Node selector for read pods
  nodeSelector: {}
  # -- Tolerations for read pods
  tolerations: []
  persistence:
    # -- Enable StatefulSetAutoDeletePVC feature
    enableStatefulSetAutoDeletePVC: true
    # -- Size of persistent disk
    size: 10Gi
    # -- Storage class to be used.
    # If defined, storageClassName: <storageClass>.
    # If set to "-", storageClassName: "", which disables dynamic provisioning.
    # If empty or set to null, no storageClassName spec is
    # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack).
    storageClass: azurefile-loki
    # -- Selector for persistent disk
    selector: null

# Configuration for the backend pod(s)
backend:
  # -- Number of replicas for the backend
  replicas: 2
  image:
    # -- The Docker registry for the backend image. Overrides `loki.image.registry`
    registry: null
    # -- Docker image repository for the backend image. Overrides `loki.image.repository`
    repository: null
    # -- Docker image tag for the backend image. Overrides `loki.image.tag`
    tag: null
  # -- The name of the PriorityClass for backend pods
  priorityClassName: null
  # -- Annotations for backend pods
  podAnnotations: {}
  # -- Additional labels for each `backend` pod
  podLabels: {}
  # -- Additional selector labels for each `backend` pod
  selectorLabels: {}
  # -- Labels for ingester service
  serviceLabels: {}
  # -- Comma-separated list of Loki modules to load for the read
  targetModule: "backend"
  # -- Additional CLI args for the backend
  extraArgs: []
  # -- Environment variables to add to the backend pods
  extraEnv: []
  # -- Environment variables from secrets or configmaps to add to the backend pods
  extraEnvFrom: []
  # -- Init containers to add to the backend pods
  initContainers: []
  # -- Volume mounts to add to the backend pods
  extraVolumeMounts: []
  # -- Volumes to add to the backend pods
  extraVolumes: []
  # -- Resource requests and limits for the backend
  resources: {}
  # -- Grace period to allow the backend to shutdown before it is killed. Especially for the ingester,
  # this must be increased. It must be long enough so backends can be gracefully shutdown flushing/transferring
  # all data and to successfully leave the member ring on shutdown.
  terminationGracePeriodSeconds: 300
  # -- Affinity for backend pods. Passed through `tpl` and, thus, to be configured as string
  # @default -- Hard node and soft zone anti-affinity
  affinity: |
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        - labelSelector:
            matchLabels:
              {{- include "loki.backendSelectorLabels" . | nindent 10 }}
          topologyKey: kubernetes.io/hostname
    nodeAffinity:
      # requiredDuringSchedulingIgnoredDuringExecution: The scheduler can't schedule the Pod unless the rule is met. This functions like nodeSelector, but with a more expressive syntax.
      # preferredDuringSchedulingIgnoredDuringExecution: The scheduler tries to find a node that meets the rule. If a matching node is not available, the scheduler still schedules the Pod.
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchExpressions:
          # Targeting only User nodepool(s)
          - key: kubernetes.azure.com/mode
            operator: In
            values:
            - user
  # -- Node selector for backend pods
  nodeSelector: {}
  # -- Tolerations for backend pods
  tolerations: []
  persistence:
    # -- Enable StatefulSetAutoDeletePVC feature
    enableStatefulSetAutoDeletePVC: true
    # -- Size of persistent disk
    size: 10Gi
    # -- Storage class to be used.
    # If defined, storageClassName: <storageClass>.
    # If set to "-", storageClassName: "", which disables dynamic provisioning.
    # If empty or set to null, no storageClassName spec is
    # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack).
    storageClass: azurefile-loki
    # -- Selector for persistent disk
    selector: null

# Configuration for the single binary node(s)
singleBinary:
  # -- Number of replicas for the single binary
  replicas: 0
  autoscaling:
    # -- Enable autoscaling, this is only used if `queryIndex.enabled: true`
    enabled: false
    # -- Minimum autoscaling replicas for the single binary
    minReplicas: 1
    # -- Maximum autoscaling replicas for the single binary
    maxReplicas: 3
    # -- Target CPU utilisation percentage for the single binary
    targetCPUUtilizationPercentage: 60
    # -- Target memory utilisation percentage for the single binary
    targetMemoryUtilizationPercentage:
  image:
    # -- The Docker registry for the single binary image. Overrides `loki.image.registry`
    registry: null
    # -- Docker image repository for the single binary image. Overrides `loki.image.repository`
    repository: null
    # -- Docker image tag for the single binary image. Overrides `loki.image.tag`
    tag: null
  # -- The name of the PriorityClass for single binary pods
  priorityClassName: null
  # -- Annotations for single binary pods
  podAnnotations: {}
  # -- Additional labels for each `single binary` pod
  podLabels: {}
  # -- Additional selector labels for each `single binary` pod
  selectorLabels: {}
  # -- Comma-separated list of Loki modules to load for the single binary
  targetModule: "all"
  # -- Labels for single binary service
  extraArgs: []
  # -- Environment variables to add to the single binary pods
  extraEnv: []
  # -- Environment variables from secrets or configmaps to add to the single binary pods
  extraEnvFrom: []
  # -- Init containers to add to the single binary pods
  initContainers: []
  # -- Volume mounts to add to the single binary pods
  extraVolumeMounts: []
  # -- Volumes to add to the single binary pods
  extraVolumes: []
  # -- Resource requests and limits for the single binary
  resources: {}
  # -- Grace period to allow the single binary to shutdown before it is killed
  terminationGracePeriodSeconds: 30
  # -- Affinity for single binary pods. Passed through `tpl` and, thus, to be configured as string
  # @default -- Hard node and soft zone anti-affinity
  affinity: |
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        - labelSelector:
            matchLabels:
              {{- include "loki.singleBinarySelectorLabels" . | nindent 10 }}
          topologyKey: kubernetes.io/hostname
    nodeAffinity:
      # requiredDuringSchedulingIgnoredDuringExecution: The scheduler can't schedule the Pod unless the rule is met. This functions like nodeSelector, but with a more expressive syntax.
      # preferredDuringSchedulingIgnoredDuringExecution: The scheduler tries to find a node that meets the rule. If a matching node is not available, the scheduler still schedules the Pod.
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchExpressions:
          # Targeting only User nodepool(s)
          - key: kubernetes.azure.com/mode
            operator: In
            values:
            - user
  # -- Node selector for single binary pods
  nodeSelector: {}
  # -- Tolerations for single binary pods
  tolerations: []
  persistence:
    # -- Enable StatefulSetAutoDeletePVC feature
    enableStatefulSetAutoDeletePVC: true
    # -- Enable persistent disk
    enabled: true
    # -- Size of persistent disk
    size: 10Gi
    # -- Storage class to be used.
    # If defined, storageClassName: <storageClass>.
    # If set to "-", storageClassName: "", which disables dynamic provisioning.
    # If empty or set to null, no storageClassName spec is
    # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack).
    storageClass: azurefile-loki
    # -- Selector for persistent disk
    selector: null

# Use either this ingress or the gateway, but not both at once.
# If you enable this, make sure to disable the gateway.
# You'll need to supply authn configuration for your ingress controller.
ingress:
  enabled: false
  ingressClassName: ""
  annotations: {}
  #    nginx.ingress.kubernetes.io/auth-type: basic
  #    nginx.ingress.kubernetes.io/auth-secret: loki-distributed-basic-auth
  #    nginx.ingress.kubernetes.io/auth-secret-type: auth-map
  #    nginx.ingress.kubernetes.io/configuration-snippet: |
  #      proxy_set_header X-Scope-OrgID $remote_user;
  paths:
    write:
      - /api/prom/push
      - /loki/api/v1/push
    read:
      - /api/prom/tail
      - /loki/api/v1/tail
      - /loki/api
      - /api/prom/rules
      - /loki/api/v1/rules
      - /prometheus/api/v1/rules
      - /prometheus/api/v1/alerts
    singleBinary:
      - /api/prom/push
      - /loki/api/v1/push
      - /api/prom/tail
      - /loki/api/v1/tail
      - /loki/api
      - /api/prom/rules
      - /loki/api/v1/rules
      - /prometheus/api/v1/rules
      - /prometheus/api/v1/alerts

  hosts:
    - loki.example.com
  tls: []
#    - hosts:
#       - loki.example.com
#      secretName: loki-distributed-tls

# Configuration for the memberlist service
memberlist:
  service:
    publishNotReadyAddresses: false

# Configuration for the gateway
gateway:
  # -- Specifies whether the gateway should be enabled
  enabled: true
  # -- Number of replicas for the gateway
  replicas: 1
  # -- Enable logging of 2xx and 3xx HTTP requests
  verboseLogging: true
  autoscaling:
    # -- Enable autoscaling for the gateway
    enabled: false
    # -- Minimum autoscaling replicas for the gateway
    minReplicas: 1
    # -- Maximum autoscaling replicas for the gateway
    maxReplicas: 3
    # -- Target CPU utilisation percentage for the gateway
    targetCPUUtilizationPercentage: 60
    # -- Target memory utilisation percentage for the gateway
    targetMemoryUtilizationPercentage:
  # -- See `kubectl explain deployment.spec.strategy` for more
  # -- ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
  deploymentStrategy:
    type: RollingUpdate
  image:
    # -- The Docker registry for the gateway image
    registry: docker.io
    # -- The gateway image repository
    repository: nginxinc/nginx-unprivileged
    # -- The gateway image tag
    tag: 1.19-alpine
    # -- The gateway image pull policy
    pullPolicy: IfNotPresent
  # -- The name of the PriorityClass for gateway pods
  priorityClassName: null
  # -- Annotations for gateway pods
  podAnnotations: {}
  # -- Additional labels for gateway pods
  podLabels: {}
  # -- Additional CLI args for the gateway
  extraArgs: []
  # -- Environment variables to add to the gateway pods
  extraEnv: []
  # -- Environment variables from secrets or configmaps to add to the gateway pods
  extraEnvFrom: []
  # -- Lifecycle for the gateway container
  lifecycle: {}
  # -- Volumes to add to the gateway pods
  extraVolumes: []
  # -- Volume mounts to add to the gateway pods
  extraVolumeMounts: []
  # -- The SecurityContext for gateway containers
  podSecurityContext:
    fsGroup: 101
    runAsGroup: 101
    runAsNonRoot: true
    runAsUser: 101
  # -- The SecurityContext for gateway containers
  containerSecurityContext:
    readOnlyRootFilesystem: true
    capabilities:
      drop:
        - ALL
    allowPrivilegeEscalation: false
  # -- Resource requests and limits for the gateway
  resources: {}
  # -- Grace period to allow the gateway to shutdown before it is killed
  terminationGracePeriodSeconds: 30
  # -- Affinity for gateway pods. Passed through `tpl` and, thus, to be configured as string
  # @default -- Hard node and soft zone anti-affinity
  affinity: |
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        - labelSelector:
            matchLabels:
              {{- include "loki.gatewaySelectorLabels" . | nindent 10 }}
          topologyKey: kubernetes.io/hostname
    nodeAffinity:
      # requiredDuringSchedulingIgnoredDuringExecution: The scheduler can't schedule the Pod unless the rule is met. This functions like nodeSelector, but with a more expressive syntax.
      # preferredDuringSchedulingIgnoredDuringExecution: The scheduler tries to find a node that meets the rule. If a matching node is not available, the scheduler still schedules the Pod.
      requiredDuringSchedulingIgnoredDuringExecution:
        nodeSelectorTerms:
        - matchExpressions:
          # Targeting only User nodepool(s)
          - key: kubernetes.azure.com/mode
            operator: In
            values:
            - user
  # -- Node selector for gateway pods
  nodeSelector: {}
  # -- Tolerations for gateway pods
  tolerations: []
  # Gateway service configuration
  service:
    # -- Port of the gateway service
    port: 80
    # -- Type of the gateway service
    type: ClusterIP
    # -- ClusterIP of the gateway service
    clusterIP: null
    # -- (int) Node port if service type is NodePort
    nodePort: null
    # -- Load balancer IPO address if service type is LoadBalancer
    loadBalancerIP: null
    # -- Annotations for the gateway service
    annotations: {}
    # -- Labels for gateway service
    labels: {}
  # Gateway ingress configuration
  ingress:
    # -- Specifies whether an ingress for the gateway should be created
    enabled: false
    # -- Ingress Class Name. MAY be required for Kubernetes versions >= 1.18
    ingressClassName: ""
    # -- Annotations for the gateway ingress
    annotations: {}
    # -- Hosts configuration for the gateway ingress
    hosts:
      - host: gateway.loki.example.com
        paths:
          - path: /
            # -- pathType (e.g. ImplementationSpecific, Prefix, .. etc.) might also be required by some Ingress Controllers
            # pathType: Prefix
    # -- TLS configuration for the gateway ingress
    tls:
      - secretName: loki-gateway-tls
        hosts:
          - gateway.loki.example.com
  # Basic auth configuration
  basicAuth:
    # -- Enables basic authentication for the gateway
    enabled: false
    # -- The basic auth username for the gateway
    username: null
    # -- The basic auth password for the gateway
    password: null
    # -- Uses the specified users from the `loki.tenants` list to create the htpasswd file
    # if `loki.tenants` is not set, the `gateway.basicAuth.username` and `gateway.basicAuth.password` are used
    # The value is templated using `tpl`. Override this to use a custom htpasswd, e.g. in case the default causes
    # high CPU load.
    htpasswd: >-
      {{ if .Values.loki.tenants }}
        {{- range $t := .Values.loki.tenants }}
      {{ htpasswd (required "All tenants must have a 'name' set" $t.name) (required "All tenants must have a 'password' set" $t.password) }}
        {{- end }}
      {{ else }}
      {{ htpasswd (required "'gateway.basicAuth.username' is required" .Values.gateway.basicAuth.username) (required "'gateway.basicAuth.password' is required" .Values.gateway.basicAuth.password) }}
      {{ end }}
    # -- Existing basic auth secret to use. Must contain '.htpasswd'
    existingSecret: null
  # Configures the readiness probe for the gateway
  readinessProbe:
    httpGet:
      path: /
      port: http
    initialDelaySeconds: 15
    timeoutSeconds: 1
  nginxConfig:
    # -- NGINX log format
    logFormat: |-
      main '$remote_addr - $remote_user [$time_local]  $status '
              '"$request" $body_bytes_sent "$http_referer" '
              '"$http_user_agent" "$http_x_forwarded_for"';
    # -- Allows appending custom configuration to the server block
    serverSnippet: ""
    # -- Allows appending custom configuration to the http block, passed through the `tpl` function to allow templating
    httpSnippet: >-
      {{ if .Values.loki.tenants }}proxy_set_header X-Scope-OrgID $remote_user;{{ end }}
    # -- Override Read URL
    customReadUrl: null
    # -- Override Write URL
    customWriteUrl: null
    # -- Override Backend URL
    customBackendUrl: null
    # -- Config file contents for Nginx. Passed through the `tpl` function to allow templating
    # @default -- See values.yaml
    file: |
      {{- include "loki.nginxFile" . | indent 2 -}}
networkPolicy:
  # -- Specifies whether Network Policies should be created
  enabled: false
  metrics:
    # -- Specifies the Pods which are allowed to access the metrics port.
    # As this is cross-namespace communication, you also need the namespaceSelector.
    podSelector: {}
    # -- Specifies the namespaces which are allowed to access the metrics port
    namespaceSelector: {}
    # -- Specifies specific network CIDRs which are allowed to access the metrics port.
    # In case you use namespaceSelector, you also have to specify your kubelet networks here.
    # The metrics ports are also used for probes.
    cidrs: []
  ingress:
    # -- Specifies the Pods which are allowed to access the http port.
    # As this is cross-namespace communication, you also need the namespaceSelector.
    podSelector: {}
    # -- Specifies the namespaces which are allowed to access the http port
    namespaceSelector: {}
  alertmanager:
    # -- Specify the alertmanager port used for alerting
    port: 9093
    # -- Specifies the alertmanager Pods.
    # As this is cross-namespace communication, you also need the namespaceSelector.
    podSelector: {}
    # -- Specifies the namespace the alertmanager is running in
    namespaceSelector: {}
  externalStorage:
    # -- Specify the port used for external storage, e.g. AWS S3
    ports: []
    # -- Specifies specific network CIDRs you want to limit access to
    cidrs: []
  discovery:
    # -- (int) Specify the port used for discovery
    port: null
    # -- Specifies the Pods labels used for discovery.
    # As this is cross-namespace communication, you also need the namespaceSelector.
    podSelector: {}
    # -- Specifies the namespace the discovery Pods are running in
    namespaceSelector: {}

tracing:
  jaegerAgentHost: ""

# -------------------------------------
# Configuration for `minio` child chart
# -------------------------------------
minio:
  enabled: false
  replicas: 1
  # Minio requires 2 to 16 drives for erasure code (drivesPerNode * replicas)
  # https://docs.min.io/docs/minio-erasure-code-quickstart-guide
  # Since we only have 1 replica, that means 2 drives must be used.
  drivesPerNode: 2
  rootUser: enterprise-logs
  rootPassword: supersecret
  buckets:
    - name: chunks
      policy: none
      purge: false
    - name: ruler
      policy: none
      purge: false
    - name: admin
      policy: none
      purge: false
  persistence:
    size: 5Gi
  resources:
    requests:
      cpu: 100m
      memory: 128Mi

# Create extra manifests via values. Would be passed through `tpl` for templating
extraObjects: []
# - apiVersion: v1
#   kind: ConfigMap
#   metadata:
#     name: loki-alerting-rules
#   data:
#     loki-alerting-rules.yaml: |-
#       groups:
#         - name: example
#           rules:
#           - alert: example
#             expr: |
#               sum(count_over_time({app="loki"} |~ "error")) > 0
#             for: 3m
#             labels:
#               severity: warning
#               category: logs
#             annotations:
#               message: "loki has encountered errors"

Expected behavior
no errors like the mentioned above

Environment:

  • Infrastructure: AKS 1.23.12
  • Deployment tool: helm

From #5216, there seems to be suggestions in using Azure File + NFS....which we tried but seems only Blob is supported? because we see constant errors regarding loki trying to reach blob endpoint like below when mounting NFS Azure Fileshares . Although, in the linked issue, the individual is using object_store: filesystem...where as if we deployed via helm..we have the config of object_store: azure .. Also from here, there is no mention of Azure; we are not sure Loki's stance regarding Azure Storage support

level=error ts=2023-03-01T02:42:41.287382368Z caller=index_set.go:285 table-name=loki_index_19417 msg="sync failed, retrying it" err="Get \"https://lokistoragedev.blob.core.windows.net/chunks?comp=list&delimiter=%2F&prefix=index%2Floki_index_19417%2F&restype=container&timeout=31\": dial tcp: lookup lokistoragedev.blob.core.windows.net on 10.0.0.10:53: no such host"

level=error ts=2023-03-01T02:42:41.287415669Z caller=index_set.go:103 table-name=loki_index_19417 msg="failed to initialize table loki_index_19417, cleaning it up" err="Get \"https://lokistoragedev.blob.core.windows.net/chunks?comp=list&delimiter=%2F&prefix=index%2Floki_index_19417%2F&restype=container&timeout=31\": dial tcp: lookup lokistoragedev.blob.core.windows.net on 10.0.0.10:53: no such host"

level=error ts=2023-03-01T02:42:41.287444169Z caller=table.go:324 table-name=loki_index_19417 msg="failed to init user index set " err="Get \"https://lokistoragedev.blob.core.windows.net/chunks?comp=list&delimiter=%2F&prefix=index%2Floki_index_19417%2F&restype=container&timeout=31\": dial tcp: lookup lokistoragedev.blob.core.windows.net on 10.0.0.10:53: no such host"

level=error ts=2023-03-01T02:42:41.287466469Z caller=table.go:342 table-name=loki_index_19417 org_id=fake msg="index set  has some problem, cleaning it up" err="Get \"https://lokistoragedev.blob.core.windows.net/chunks?comp=list&delimiter=%2F&prefix=index%2Floki_index_19417%2F&restype=container&timeout=31\": dial tcp: lookup lokistoragedev.blob.core.windows.net on 10.0.0.10:53: no such host"
@thiDucTran
Copy link
Author

closing as this is no longer relevant to our needs. we switched to using loki-distributed chart. See https://grafana.com/docs/loki/latest/fundamentals/architecture/deployment-modes/#microservices-mode and https://github.com/grafana/helm-charts/tree/main/charts/loki-distributed

this is our values.yaml for grafana/loki-distributed --version 0.69.8

global:
  image:
    # -- Overrides the Docker registry globally for all images
    registry: null
  # -- Overrides the priorityClassName for all pods
  priorityClassName: null
  # -- configures cluster domain ("cluster.local" by default)
  clusterDomain: "cluster.local"
  # -- configures DNS service name
  dnsService: "kube-dns"
  # -- configures DNS service namespace
  dnsNamespace: "kube-system"

# -- Overrides the chart's name
nameOverride: null

# -- Overrides the chart's computed fullname
fullnameOverride: null

# -- Image pull secrets for Docker images
imagePullSecrets: []

loki:
  # -- If set, these annotations are added to all of the Kubernetes controllers
  # (Deployments, StatefulSets, etc) that this chart launches. Use this to
  # implement something like the "Wave" controller or another controller that
  # is monitoring top level deployment resources.
  annotations: {}
  # Configures the readiness probe for all of the Loki pods
  readinessProbe:
    httpGet:
      path: /ready
      port: http
    initialDelaySeconds: 30
    timeoutSeconds: 1
  livenessProbe:
    httpGet:
      path: /ready
      port: http
    initialDelaySeconds: 300
  image:
    # -- The Docker registry
    registry: docker.io
    # -- Docker image repository
    repository: grafana/loki
    # -- Overrides the image tag whose default is the chart's appVersion
    tag: null
    # -- Docker image pull policy
    pullPolicy: IfNotPresent
  # -- Common labels for all pods
  podLabels: {}
  # -- Common annotations for all pods
  podAnnotations: {}
  # -- Common command override for all pods (except gateway)
  command: null
  # -- The number of old ReplicaSets to retain to allow rollback
  revisionHistoryLimit: 10
  # -- The SecurityContext for Loki pods
  podSecurityContext:
    fsGroup: 10001
    runAsGroup: 10001
    runAsNonRoot: true
    runAsUser: 10001
  # -- The SecurityContext for Loki containers
  containerSecurityContext:
    readOnlyRootFilesystem: true
    capabilities:
      drop:
        - ALL
    allowPrivilegeEscalation: false
  # -- Specify an existing secret containing loki configuration. If non-empty, overrides `loki.config`
  existingSecretForConfig: ""
  # -- Adds the appProtocol field to the memberlist service. This allows memberlist to work with istio protocol selection. Ex: "http" or "tcp"
  appProtocol: ""
  # -- Common annotations for all loki services
  serviceAnnotations: {}
  # -- Config file contents for Loki
  # @default -- See values.yaml
  config: |
    auth_enabled: false

    server:
      http_listen_port: 3100

    common:
      compactor_address: http://{{ include "loki.compactorFullname" . }}:3100

    distributor:
      ring:
        kvstore:
          store: memberlist

    memberlist:
      join_members:
        - {{ include "loki.fullname" . }}-memberlist

    ingester:
      lifecycler:
        ring:
          kvstore:
            store: memberlist
          replication_factor: 1
      chunk_idle_period: 30m
      chunk_block_size: 262144
      chunk_encoding: snappy
      chunk_retain_period: 1m
      max_transfer_retries: 0
      wal:
        dir: /var/loki/wal

    limits_config:
      enforce_metric_name: false
      reject_old_samples: true
      reject_old_samples_max_age: 168h
      max_cache_freshness_per_query: 10m
      split_queries_by_interval: 15m

    {{- if .Values.loki.schemaConfig}}
    schema_config:
    {{- toYaml .Values.loki.schemaConfig | nindent 2}}
    {{- end}}
    {{- if .Values.loki.storageConfig}}
    storage_config:
    {{- if .Values.indexGateway.enabled}}
    {{- $indexGatewayClient := dict "server_address" (printf "dns:///%s:9095" (include "loki.indexGatewayFullname" .)) }}
    {{- $_ := set .Values.loki.storageConfig.boltdb_shipper "index_gateway_client" $indexGatewayClient }}
    {{- end}}
    {{- toYaml .Values.loki.storageConfig | nindent 2}}
    {{- if .Values.memcachedIndexQueries.enabled }}
      index_queries_cache_config:
        memcached_client:
          addresses: dnssrv+_memcached-client._tcp.{{ include "loki.memcachedIndexQueriesFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}
          consistent_hash: true
    {{- end}}
    {{- end}}

    runtime_config:
      file: /var/{{ include "loki.name" . }}-runtime/runtime.yaml

    chunk_store_config:
      max_look_back_period: 0s
      {{- if .Values.memcachedChunks.enabled }}
      chunk_cache_config:
        embedded_cache:
          enabled: false
        memcached_client:
          consistent_hash: true
          addresses: dnssrv+_memcached-client._tcp.{{ include "loki.memcachedChunksFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}
      {{- end }}
      {{- if .Values.memcachedIndexWrites.enabled }}
      write_dedupe_cache_config:
        memcached_client:
          consistent_hash: true
          addresses: dnssrv+_memcached-client._tcp.{{ include "loki.memcachedIndexWritesFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}
      {{- end }}

    table_manager:
      retention_deletes_enabled: false
      retention_period: 0s

    query_range:
      align_queries_with_step: true
      max_retries: 5
      cache_results: true
      results_cache:
        cache:
          {{- if .Values.memcachedFrontend.enabled }}
          memcached_client:
            host: {{ include "loki.memcachedFrontendFullname" . }}
            consistent_hash: true
          {{- else }}
          embedded_cache:
            enabled: true
            ttl: 24h
          {{- end }}

    frontend_worker:
      {{- if .Values.queryScheduler.enabled }}
      scheduler_address: {{ include "loki.querySchedulerFullname" . }}:9095
      {{- else }}
      frontend_address: {{ include "loki.queryFrontendFullname" . }}:9095
      {{- end }}

    frontend:
      log_queries_longer_than: 5s
      compress_responses: true
      {{- if .Values.queryScheduler.enabled }}
      scheduler_address: {{ include "loki.querySchedulerFullname" . }}:9095
      {{- end }}
      tail_proxy_url: http://{{ include "loki.querierFullname" . }}:3100

    compactor:
      shared_store: filesystem

    ruler:
      storage:
        type: local
        local:
          directory: /etc/loki/rules
      ring:
        kvstore:
          store: memberlist
      rule_path: /tmp/loki/scratch
      alertmanager_url: https://alertmanager.xx
      external_url: https://alertmanager.xx

  # -- Check https://grafana.com/docs/loki/latest/configuration/#schema_config for more info on how to configure schemas
  schemaConfig:
    configs:
    - from: 2020-09-07
      store: boltdb-shipper
      object_store: filesystem
      schema: v11
      index:
        prefix: loki_index_
        period: 24h

  # -- Check https://grafana.com/docs/loki/latest/configuration/#storage_config for more info on how to configure storages
  storageConfig:
    boltdb_shipper:
      shared_store: filesystem
      active_index_directory: /var/loki/index
      cache_location: /var/loki/cache
      cache_ttl: 168h
    filesystem:
      directory: /var/loki/chunks
# -- Uncomment to configure each storage individually
#   azure: {}
#   gcs: {}
#   s3: {}
#   boltdb: {}

  # -- Structured loki configuration, takes precedence over `loki.config`, `loki.schemaConfig`, `loki.storageConfig`
  structuredConfig:
    compactor:
      shared_store: azure
    ingester:
      max_transfer_retries: 0
      chunk_idle_period: 1h
      chunk_target_size: 1572864
      max_chunk_age: 1h
    schema_config:
      configs:
      - from: "2020-12-11"
        index:
          period: 24h
          prefix: index_
        object_store: azure
        schema: v11
        store: boltdb-shipper
    storage_config:
      azure:
        account_key: ""
        account_name: lokistoragedev
        container_name: logs
        request_timeout: 0
        use_managed_identity: false
      boltdb_shipper:
        active_index_directory: /var/loki/data/loki/boltdb-shipper-active
        cache_location: /var/loki/data/loki/boltdb-shipper-cache
        cache_ttl: 24h
        shared_store: azure
      filesystem:
        directory: /var/loki/data/loki/chunks

# -- Provides a reloadable runtime configuration file for some specific configuration
runtimeConfig: {}

serviceAccount:
  # -- Specifies whether a ServiceAccount should be created
  create: true
  # -- The name of the ServiceAccount to use.
  # If not set and create is true, a name is generated using the fullname template
  name: null
  # -- Image pull secrets for the service account
  imagePullSecrets: []
  # -- Annotations for the service account
  annotations: {}
  # -- Set this toggle to false to opt out of automounting API credentials for the service account
  automountServiceAccountToken: true

# RBAC configuration
rbac:
  # -- If pspEnabled true, a PodSecurityPolicy is created for K8s that use psp.
  pspEnabled: false
  # -- For OpenShift set pspEnabled to 'false' and sccEnabled to 'true' to use the SecurityContextConstraints.
  sccEnabled: false

# ServiceMonitor configuration
serviceMonitor:
  # -- If enabled, ServiceMonitor resources for Prometheus Operator are created
  enabled: false
  # -- Alternative namespace for ServiceMonitor resources
  namespace: null
  # -- Namespace selector for ServiceMonitor resources
  namespaceSelector: {}
  # -- ServiceMonitor annotations
  annotations: {}
  # -- Additional ServiceMonitor labels
  labels: {}
  # -- ServiceMonitor scrape interval
  interval: null
  # -- ServiceMonitor scrape timeout in Go duration format (e.g. 15s)
  scrapeTimeout: null
  # -- ServiceMonitor relabel configs to apply to samples before scraping
  # https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig
  relabelings: []
  # -- ServiceMonitor metric relabel configs to apply to samples before ingestion
  # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#endpoint
  metricRelabelings: []
  # --ServiceMonitor will add labels from the service to the Prometheus metric
  # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitorspec
  targetLabels: []
  # -- ServiceMonitor will use http by default, but you can pick https as well
  scheme: http
  # -- ServiceMonitor will use these tlsConfig settings to make the health check requests
  tlsConfig: null

# Rules for the Prometheus Operator
prometheusRule:
  # -- If enabled, a PrometheusRule resource for Prometheus Operator is created
  enabled: false
  # -- Alternative namespace for the PrometheusRule resource
  namespace: null
  # -- PrometheusRule annotations
  annotations: {}
  # -- Additional PrometheusRule labels
  labels: {}
  # -- Contents of Prometheus rules file
  groups:
   - name: loki_rules
     rules:
       - expr: histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket[1m]))
           by (le, cluster, job))
         record: cluster_job:loki_request_duration_seconds:99quantile
       - expr: histogram_quantile(0.50, sum(rate(loki_request_duration_seconds_bucket[1m]))
           by (le, cluster, job))
         record: cluster_job:loki_request_duration_seconds:50quantile
       - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, job) / sum(rate(loki_request_duration_seconds_count[1m]))
           by (cluster, job)
         record: cluster_job:loki_request_duration_seconds:avg
       - expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, job)
         record: cluster_job:loki_request_duration_seconds_bucket:sum_rate
       - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, job)
         record: cluster_job:loki_request_duration_seconds_sum:sum_rate
       - expr: sum(rate(loki_request_duration_seconds_count[1m])) by (cluster, job)
         record: cluster_job:loki_request_duration_seconds_count:sum_rate
       - expr: histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket[1m]))
           by (le, cluster, job, route))
         record: cluster_job_route:loki_request_duration_seconds:99quantile
       - expr: histogram_quantile(0.50, sum(rate(loki_request_duration_seconds_bucket[1m]))
           by (le, cluster, job, route))
         record: cluster_job_route:loki_request_duration_seconds:50quantile
       - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, job, route)
           / sum(rate(loki_request_duration_seconds_count[1m])) by (cluster, job, route)
         record: cluster_job_route:loki_request_duration_seconds:avg
       - expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, job,
           route)
         record: cluster_job_route:loki_request_duration_seconds_bucket:sum_rate
       - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, job, route)
         record: cluster_job_route:loki_request_duration_seconds_sum:sum_rate
       - expr: sum(rate(loki_request_duration_seconds_count[1m])) by (cluster, job, route)
         record: cluster_job_route:loki_request_duration_seconds_count:sum_rate
       - expr: histogram_quantile(0.99, sum(rate(loki_request_duration_seconds_bucket[1m]))
           by (le, cluster, namespace, job, route))
         record: cluster_namespace_job_route:loki_request_duration_seconds:99quantile
       - expr: histogram_quantile(0.50, sum(rate(loki_request_duration_seconds_bucket[1m]))
           by (le, cluster, namespace, job, route))
         record: cluster_namespace_job_route:loki_request_duration_seconds:50quantile
       - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, namespace,
           job, route) / sum(rate(loki_request_duration_seconds_count[1m])) by (cluster,
           namespace, job, route)
         record: cluster_namespace_job_route:loki_request_duration_seconds:avg
       - expr: sum(rate(loki_request_duration_seconds_bucket[1m])) by (le, cluster, namespace,
           job, route)
         record: cluster_namespace_job_route:loki_request_duration_seconds_bucket:sum_rate
       - expr: sum(rate(loki_request_duration_seconds_sum[1m])) by (cluster, namespace,
           job, route)
         record: cluster_namespace_job_route:loki_request_duration_seconds_sum:sum_rate
       - expr: sum(rate(loki_request_duration_seconds_count[1m])) by (cluster, namespace,
           job, route)
         record: cluster_namespace_job_route:loki_request_duration_seconds_count:sum_rate

# Configuration for the ingester
ingester:
  # -- Kind of deployment [StatefulSet/Deployment]
  kind: StatefulSet
  # -- Number of replicas for the ingester
  replicas: 1
  autoscaling:
    # -- Enable autoscaling for the ingester
    enabled: false
    # -- Minimum autoscaling replicas for the ingester
    minReplicas: 1
    # -- Maximum autoscaling replicas for the ingester
    maxReplicas: 3
    # -- Target CPU utilisation percentage for the ingester
    targetCPUUtilizationPercentage: 60
    # -- Target memory utilisation percentage for the ingester
    targetMemoryUtilizationPercentage:
  image:
    # -- The Docker registry for the ingester image. Overrides `loki.image.registry`
    registry: null
    # -- Docker image repository for the ingester image. Overrides `loki.image.repository`
    repository: null
    # -- Docker image tag for the ingester image. Overrides `loki.image.tag`
    tag: null
  # -- Command to execute instead of defined in Docker image
  command: null
  # -- The name of the PriorityClass for ingester pods
  priorityClassName: null
  # -- Labels for ingester pods
  podLabels: {}
  # -- Annotations for ingester pods
  podAnnotations: {}
  # -- Labels for ingestor service
  serviceLabels: {}
  # -- Additional CLI args for the ingester
  extraArgs: []
  # -- Environment variables to add to the ingester pods
  extraEnv: []
  # -- Environment variables from secrets or configmaps to add to the ingester pods
  extraEnvFrom: []
  # -- Volume mounts to add to the ingester pods
  extraVolumeMounts: []
  # -- Volumes to add to the ingester pods
  extraVolumes: []
  # -- Resource requests and limits for the ingester
  resources: {}
  # -- Containers to add to the ingester pods
  extraContainers: []
  # -- Init containers to add to the ingester pods
  initContainers: []
  # -- Grace period to allow the ingester to shutdown before it is killed. Especially for the ingestor,
  # this must be increased. It must be long enough so ingesters can be gracefully shutdown flushing/transferring
  # all data and to successfully leave the member ring on shutdown.
  terminationGracePeriodSeconds: 300
  # -- topologySpread for ingester pods. Passed through `tpl` and, thus, to be configured as string
  # @default -- Defaults to allow skew no more then 1 node per AZ
  topologySpreadConstraints: |
    - maxSkew: 1
      topologyKey: kubernetes.io/hostname
      whenUnsatisfiable: ScheduleAnyway
      labelSelector:
        matchLabels:
          {{- include "loki.ingesterSelectorLabels" . | nindent 6 }}
  # -- Affinity for ingester pods. Passed through `tpl` and, thus, to be configured as string
  # @default -- Hard node and soft zone anti-affinity
  affinity: |
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        - labelSelector:
            matchLabels:
              {{- include "loki.ingesterSelectorLabels" . | nindent 10 }}
          topologyKey: kubernetes.io/hostname
      preferredDuringSchedulingIgnoredDuringExecution:
        - weight: 100
          podAffinityTerm:
            labelSelector:
              matchLabels:
                {{- include "loki.ingesterSelectorLabels" . | nindent 12 }}
            topologyKey: failure-domain.beta.kubernetes.io/zone
  # -- Pod Disruption Budget maxUnavailable
  maxUnavailable: null
  # -- Node selector for ingester pods
  nodeSelector: {}
  # -- Tolerations for ingester pods
  tolerations: []
  # -- readiness probe settings for ingester pods. If empty, use `loki.readinessProbe`
  readinessProbe: {}
  # -- liveness probe settings for ingester pods. If empty use `loki.livenessProbe`
  livenessProbe: {}
  persistence:
    # -- Enable creating PVCs which is required when using boltdb-shipper
    enabled: false
    # -- Use emptyDir with ramdisk for storage. **Please note that all data in ingester will be lost on pod restart**
    inMemory: false
    # -- Size of persistent or memory disk
    size: 10Gi
    # -- Storage class to be used.
    # If defined, storageClassName: <storageClass>.
    # If set to "-", storageClassName: "", which disables dynamic provisioning.
    # If empty or set to null, no storageClassName spec is
    # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack).
    storageClass: null
    # -- Annotations for ingester PVCs
    annotations: {}
  # -- Adds the appProtocol field to the ingester service. This allows ingester to work with istio protocol selection.
  appProtocol:
    # -- Set the optional grpc service protocol. Ex: "grpc", "http2" or "https"
    grpc: ""

# Configuration for the distributor
distributor:
  # -- Number of replicas for the distributor
  replicas: 1
  autoscaling:
    # -- Enable autoscaling for the distributor
    enabled: false
    # -- Minimum autoscaling replicas for the distributor
    minReplicas: 1
    # -- Maximum autoscaling replicas for the distributor
    maxReplicas: 3
    # -- Target CPU utilisation percentage for the distributor
    targetCPUUtilizationPercentage: 60
    # -- Target memory utilisation percentage for the distributor
    targetMemoryUtilizationPercentage:
  image:
    # -- The Docker registry for the distributor image. Overrides `loki.image.registry`
    registry: null
    # -- Docker image repository for the distributor image. Overrides `loki.image.repository`
    repository: null
    # -- Docker image tag for the distributor image. Overrides `loki.image.tag`
    tag: null
  # -- Command to execute instead of defined in Docker image
  command: null
  # -- The name of the PriorityClass for distributor pods
  priorityClassName: null
  # -- Labels for distributor pods
  podLabels: {}
  # -- Annotations for distributor pods
  podAnnotations: {}
  # -- Labels for distributor service
  serviceLabels: {}
  # -- Additional CLI args for the distributor
  extraArgs: []
  # -- Environment variables to add to the distributor pods
  extraEnv: []
  # -- Environment variables from secrets or configmaps to add to the distributor pods
  extraEnvFrom: []
  # -- Volume mounts to add to the distributor pods
  extraVolumeMounts: []
  # -- Volumes to add to the distributor pods
  extraVolumes: []
  # -- Resource requests and limits for the distributor
  resources: {}
  # -- Containers to add to the distributor pods
  extraContainers: []
  # -- Grace period to allow the distributor to shutdown before it is killed
  terminationGracePeriodSeconds: 30
  # -- Affinity for distributor pods. Passed through `tpl` and, thus, to be configured as string
  # @default -- Hard node and soft zone anti-affinity
  affinity: |
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        - labelSelector:
            matchLabels:
              {{- include "loki.distributorSelectorLabels" . | nindent 10 }}
          topologyKey: kubernetes.io/hostname
      preferredDuringSchedulingIgnoredDuringExecution:
        - weight: 100
          podAffinityTerm:
            labelSelector:
              matchLabels:
                {{- include "loki.distributorSelectorLabels" . | nindent 12 }}
            topologyKey: failure-domain.beta.kubernetes.io/zone
  # -- Pod Disruption Budget maxUnavailable
  maxUnavailable: null
  # -- Node selector for distributor pods
  nodeSelector: {}
  # -- Tolerations for distributor pods
  tolerations: []
    # -- Adds the appProtocol field to the distributor service. This allows distributor to work with istio protocol selection.
  appProtocol:
    # -- Set the optional grpc service protocol. Ex: "grpc", "http2" or "https"
    grpc: ""

# Configuration for the querier
querier:
  # -- Number of replicas for the querier
  replicas: 1
  autoscaling:
    # -- Enable autoscaling for the querier, this is only used if `indexGateway.enabled: true`
    enabled: false
    # -- Minimum autoscaling replicas for the querier
    minReplicas: 1
    # -- Maximum autoscaling replicas for the querier
    maxReplicas: 3
    # -- Target CPU utilisation percentage for the querier
    targetCPUUtilizationPercentage: 60
    # -- Target memory utilisation percentage for the querier
    targetMemoryUtilizationPercentage:
  image:
    # -- The Docker registry for the querier image. Overrides `loki.image.registry`
    registry: null
    # -- Docker image repository for the querier image. Overrides `loki.image.repository`
    repository: null
    # -- Docker image tag for the querier image. Overrides `loki.image.tag`
    tag: null
  # -- Command to execute instead of defined in Docker image
  command: null
  # -- The name of the PriorityClass for querier pods
  priorityClassName: null
  # -- Labels for querier pods
  podLabels: {}
  # -- Annotations for querier pods
  podAnnotations: {}
  # -- Labels for querier service
  serviceLabels: {}
  # -- Additional CLI args for the querier
  extraArgs: []
  # -- Environment variables to add to the querier pods
  extraEnv: []
  # -- Environment variables from secrets or configmaps to add to the querier pods
  extraEnvFrom: []
  # -- Volume mounts to add to the querier pods
  extraVolumeMounts: []
  # -- Volumes to add to the querier pods
  extraVolumes: []
  # -- Resource requests and limits for the querier
  resources: {}
  # -- Containers to add to the querier pods
  extraContainers: []
  # -- Init containers to add to the querier pods
  initContainers: []
  # -- Grace period to allow the querier to shutdown before it is killed
  terminationGracePeriodSeconds: 30
  # -- topologySpread for querier pods. Passed through `tpl` and, thus, to be configured as string
  # @default -- Defaults to allow skew no more then 1 node per AZ
  topologySpreadConstraints: |
    - maxSkew: 1
      topologyKey: kubernetes.io/hostname
      whenUnsatisfiable: ScheduleAnyway
      labelSelector:
        matchLabels:
          {{- include "loki.querierSelectorLabels" . | nindent 6 }}
  # -- Affinity for querier pods. Passed through `tpl` and, thus, to be configured as string
  # @default -- Hard node and soft zone anti-affinity
  affinity: {}
  # -- Pod Disruption Budget maxUnavailable
  maxUnavailable: null
  # -- Node selector for querier pods
  nodeSelector: {}
  # -- Tolerations for querier pods
  tolerations: []
  # -- DNSConfig for querier pods
  dnsConfig: {}
  persistence:
    # -- Enable creating PVCs for the querier cache
    enabled: false
    # -- Size of persistent disk
    size: 10Gi
    # -- Storage class to be used.
    # If defined, storageClassName: <storageClass>.
    # If set to "-", storageClassName: "", which disables dynamic provisioning.
    # If empty or set to null, no storageClassName spec is
    # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack).
    storageClass: null
    # -- Annotations for querier PVCs
    annotations: {}
  # -- Adds the appProtocol field to the querier service. This allows querier to work with istio protocol selection.
  appProtocol:
    # -- Set the optional grpc service protocol. Ex: "grpc", "http2" or "https"
    grpc: ""

# Configuration for the query-frontend
queryFrontend:
  # -- Number of replicas for the query-frontend
  replicas: 1
  autoscaling:
    # -- Enable autoscaling for the query-frontend
    enabled: false
    # -- Minimum autoscaling replicas for the query-frontend
    minReplicas: 1
    # -- Maximum autoscaling replicas for the query-frontend
    maxReplicas: 3
    # -- Target CPU utilisation percentage for the query-frontend
    targetCPUUtilizationPercentage: 60
    # -- Target memory utilisation percentage for the query-frontend
    targetMemoryUtilizationPercentage:
  image:
    # -- The Docker registry for the query-frontend image. Overrides `loki.image.registry`
    registry: null
    # -- Docker image repository for the query-frontend image. Overrides `loki.image.repository`
    repository: null
    # -- Docker image tag for the query-frontend image. Overrides `loki.image.tag`
    tag: null
  # -- Command to execute instead of defined in Docker image
  command: null
  # -- The name of the PriorityClass for query-frontend pods
  priorityClassName: null
  # -- Labels for query-frontend pods
  podLabels: {}
  # -- Annotations for query-frontend pods
  podAnnotations: {}
  # -- Labels for query-frontend service
  serviceLabels: {}
  # -- Additional CLI args for the query-frontend
  extraArgs: []
  # -- Environment variables to add to the query-frontend pods
  extraEnv: []
  # -- Environment variables from secrets or configmaps to add to the query-frontend pods
  extraEnvFrom: []
  # -- Volume mounts to add to the query-frontend pods
  extraVolumeMounts: []
  # -- Volumes to add to the query-frontend pods
  extraVolumes: []
  # -- Resource requests and limits for the query-frontend
  resources: {}
  # -- Containers to add to the query-frontend pods
  extraContainers: []
  # -- Grace period to allow the query-frontend to shutdown before it is killed
  terminationGracePeriodSeconds: 30
  # -- Affinity for query-frontend pods. Passed through `tpl` and, thus, to be configured as string
  # @default -- Hard node and soft zone anti-affinity
  affinity: |
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        - labelSelector:
            matchLabels:
              {{- include "loki.queryFrontendSelectorLabels" . | nindent 10 }}
          topologyKey: kubernetes.io/hostname
      preferredDuringSchedulingIgnoredDuringExecution:
        - weight: 100
          podAffinityTerm:
            labelSelector:
              matchLabels:
                {{- include "loki.queryFrontendSelectorLabels" . | nindent 12 }}
            topologyKey: failure-domain.beta.kubernetes.io/zone
  # -- Pod Disruption Budget maxUnavailable
  maxUnavailable: null
  # -- Node selector for query-frontend pods
  nodeSelector: {}
  # -- Tolerations for query-frontend pods
  tolerations: []
  # -- Adds the appProtocol field to the queryFrontend service. This allows queryFrontend to work with istio protocol selection.
  appProtocol:
    # -- Set the optional grpc service protocol. Ex: "grpc", "http2" or "https"
    grpc: ""

# Configuration for the query-scheduler
queryScheduler:
  # -- Specifies whether the query-scheduler should be decoupled from the query-frontend
  enabled: false
  # -- Number of replicas for the query-scheduler.
  # It should be lower than `-querier.max-concurrent` to avoid generating back-pressure in queriers;
  # it's also recommended that this value evenly divides the latter
  replicas: 2
  image:
    # -- The Docker registry for the query-scheduler image. Overrides `loki.image.registry`
    registry: null
    # -- Docker image repository for the query-scheduler image. Overrides `loki.image.repository`
    repository: null
    # -- Docker image tag for the query-scheduler image. Overrides `loki.image.tag`
    tag: null
  # -- The name of the PriorityClass for query-scheduler pods
  priorityClassName: null
  # -- Labels for query-scheduler pods
  podLabels: {}
  # -- Annotations for query-scheduler pods
  podAnnotations: {}
  # -- Labels for query-scheduler service
  serviceLabels: {}
  # -- Additional CLI args for the query-scheduler
  extraArgs: []
  # -- Environment variables to add to the query-scheduler pods
  extraEnv: []
  # -- Environment variables from secrets or configmaps to add to the query-scheduler pods
  extraEnvFrom: []
  # -- Volume mounts to add to the query-scheduler pods
  extraVolumeMounts: []
  # -- Volumes to add to the query-scheduler pods
  extraVolumes: []
  # -- Resource requests and limits for the query-scheduler
  resources: {}
  # -- Containers to add to the query-scheduler pods
  extraContainers: []
  # -- Grace period to allow the query-scheduler to shutdown before it is killed
  terminationGracePeriodSeconds: 30
  # -- Affinity for query-scheduler pods. Passed through `tpl` and, thus, to be configured as string
  # @default -- Hard node and soft zone anti-affinity
  affinity: |
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        - labelSelector:
            matchLabels:
              {{- include "loki.querySchedulerSelectorLabels" . | nindent 10 }}
          topologyKey: kubernetes.io/hostname
      preferredDuringSchedulingIgnoredDuringExecution:
        - weight: 100
          podAffinityTerm:
            labelSelector:
              matchLabels:
                {{- include "loki.querySchedulerSelectorLabels" . | nindent 12 }}
            topologyKey: failure-domain.beta.kubernetes.io/zone
  # -- Pod Disruption Budget maxUnavailable
  maxUnavailable: 1
  # -- Node selector for query-scheduler pods
  nodeSelector: {}
  # -- Tolerations for query-scheduler pods
  tolerations: []

# Configuration for the table-manager
tableManager:
  # -- Specifies whether the table-manager should be enabled
  enabled: false
  image:
    # -- The Docker registry for the table-manager image. Overrides `loki.image.registry`
    registry: null
    # -- Docker image repository for the table-manager image. Overrides `loki.image.repository`
    repository: null
    # -- Docker image tag for the table-manager image. Overrides `loki.image.tag`
    tag: null
  # -- Command to execute instead of defined in Docker image
  command: null
  # -- The name of the PriorityClass for table-manager pods
  priorityClassName: null
  # -- Labels for table-manager pods
  podLabels: {}
  # -- Annotations for table-manager pods
  podAnnotations: {}
  # -- Labels for table-manager service
  serviceLabels: {}
  # -- Additional CLI args for the table-manager
  extraArgs: []
  # -- Environment variables to add to the table-manager pods
  extraEnv: []
  # -- Environment variables from secrets or configmaps to add to the table-manager pods
  extraEnvFrom: []
  # -- Volume mounts to add to the table-manager pods
  extraVolumeMounts: []
  # -- Volumes to add to the table-manager pods
  extraVolumes: []
  # -- Resource requests and limits for the table-manager
  resources: {}
  # -- Containers to add to the table-manager pods
  extraContainers: []
  # -- Grace period to allow the table-manager to shutdown before it is killed
  terminationGracePeriodSeconds: 30
  # -- Affinity for table-manager pods. Passed through `tpl` and, thus, to be configured as string
  # @default -- Hard node and soft zone anti-affinity
  affinity: |
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        - labelSelector:
            matchLabels:
              {{- include "loki.tableManagerSelectorLabels" . | nindent 10 }}
          topologyKey: kubernetes.io/hostname
      preferredDuringSchedulingIgnoredDuringExecution:
        - weight: 100
          podAffinityTerm:
            labelSelector:
              matchLabels:
                {{- include "loki.tableManagerSelectorLabels" . | nindent 12 }}
            topologyKey: failure-domain.beta.kubernetes.io/zone
  # -- Node selector for table-manager pods
  nodeSelector: {}
  # -- Tolerations for table-manager pods
  tolerations: []

# Use either this ingress or the gateway, but not both at once.
# If you enable this, make sure to disable the gateway.
# You'll need to supply authn configuration for your ingress controller.
ingress:
  enabled: false
#  ingressClassName: nginx
  annotations: {}
#    nginx.ingress.kubernetes.io/auth-type: basic
#    nginx.ingress.kubernetes.io/auth-secret: loki-distributed-basic-auth
#    nginx.ingress.kubernetes.io/auth-secret-type: auth-map
#    nginx.ingress.kubernetes.io/configuration-snippet: |
#      proxy_set_header X-Scope-OrgID $remote_user;
  paths:
    distributor:
      - /api/prom/push
      - /loki/api/v1/push
    querier:
      - /api/prom/tail
      - /loki/api/v1/tail
    query-frontend:
      - /loki/api
    ruler:
      - /api/prom/rules
      - /loki/api/v1/rules
      - /prometheus/api/v1/rules
      - /prometheus/api/v1/alerts
  hosts:
    - loki.example.com
  # tls:
  #   - secretName: loki-distributed-tls
  #     hosts:
  #       - loki.example.com

# Configuration for the gateway
gateway:
  # -- Specifies whether the gateway should be enabled
  enabled: true
  # -- Number of replicas for the gateway
  replicas: 1
  # -- Enable logging of 2xx and 3xx HTTP requests
  verboseLogging: true
  autoscaling:
    # -- Enable autoscaling for the gateway
    enabled: false
    # -- Minimum autoscaling replicas for the gateway
    minReplicas: 1
    # -- Maximum autoscaling replicas for the gateway
    maxReplicas: 3
    # -- Target CPU utilisation percentage for the gateway
    targetCPUUtilizationPercentage: 60
    # -- Target memory utilisation percentage for the gateway
    targetMemoryUtilizationPercentage:
  # -- See `kubectl explain deployment.spec.strategy` for more,
  # ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy
  deploymentStrategy:
    type: RollingUpdate
  image:
    # -- The Docker registry for the gateway image
    registry: docker.io
    # -- The gateway image repository
    repository: nginxinc/nginx-unprivileged
    # -- The gateway image tag
    tag: 1.20.2-alpine
    # -- The gateway image pull policy
    pullPolicy: IfNotPresent
  # -- The name of the PriorityClass for gateway pods
  priorityClassName: null
  # -- Labels for gateway pods
  podLabels: {}
  # -- Annotations for gateway pods
  podAnnotations: {}
  # -- Additional CLI args for the gateway
  extraArgs: []
  # -- Environment variables to add to the gateway pods
  extraEnv: []
  # -- Environment variables from secrets or configmaps to add to the gateway pods
  extraEnvFrom: []
  # -- Volumes to add to the gateway pods
  extraVolumes: []
  # -- Volume mounts to add to the gateway pods
  extraVolumeMounts: []
  # -- The SecurityContext for gateway containers
  podSecurityContext:
    fsGroup: 101
    runAsGroup: 101
    runAsNonRoot: true
    runAsUser: 101
  # -- The SecurityContext for gateway containers
  containerSecurityContext:
    readOnlyRootFilesystem: true
    capabilities:
      drop:
        - ALL
    allowPrivilegeEscalation: false
  # -- Resource requests and limits for the gateway
  resources: {}
  # -- Containers to add to the gateway pods
  extraContainers: []
  # -- Grace period to allow the gateway to shutdown before it is killed
  terminationGracePeriodSeconds: 30
  # -- Affinity for gateway pods. Passed through `tpl` and, thus, to be configured as string
  # @default -- Hard node and soft zone anti-affinity
  affinity: |
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        - labelSelector:
            matchLabels:
              {{- include "loki.gatewaySelectorLabels" . | nindent 10 }}
          topologyKey: kubernetes.io/hostname
      preferredDuringSchedulingIgnoredDuringExecution:
        - weight: 100
          podAffinityTerm:
            labelSelector:
              matchLabels:
                {{- include "loki.gatewaySelectorLabels" . | nindent 12 }}
            topologyKey: failure-domain.beta.kubernetes.io/zone
  # -- Pod Disruption Budget maxUnavailable
  maxUnavailable: null
  # -- Node selector for gateway pods
  nodeSelector: {}
  # -- Tolerations for gateway pods
  tolerations: []
  # -- DNSConfig for gateway pods
  dnsConfig: {}
  # Gateway service configuration
  service:
    # -- Port of the gateway service
    port: 80
    # -- Type of the gateway service
    type: ClusterIP
    # -- ClusterIP of the gateway service
    clusterIP: null
    # -- Node port if service type is NodePort
    nodePort: null
    # -- Load balancer IPO address if service type is LoadBalancer
    loadBalancerIP: null
    # -- Load balancer allow traffic from CIDR list if service type is LoadBalancer
    loadBalancerSourceRanges: []
    # -- Set appProtocol for the service
    appProtocol: null
    # -- Annotations for the gateway service
    annotations: {}
    # -- Labels for gateway service
    labels: {}
  # Gateway ingress configuration
  ingress:
    # -- Specifies whether an ingress for the gateway should be created
    enabled: false
    # -- Ingress Class Name. MAY be required for Kubernetes versions >= 1.18
    # For example: `ingressClassName: nginx`
    ingressClassName: ''

    # -- Annotations for the gateway ingress
    annotations: {}
    # -- Hosts configuration for the gateway ingress
    hosts:
      - host: gateway.loki.example.com
        paths:
          - path: /
            # -- pathType (e.g. ImplementationSpecific, Prefix, .. etc.) might also be required by some Ingress Controllers
            # pathType: Prefix
    # -- TLS configuration for the gateway ingress
    tls: []
    # tls:
    #   - secretName: loki-gateway-tls
    #     hosts:
    #       - gateway.loki.example.com

  # Basic auth configuration
  basicAuth:
    # -- Enables basic authentication for the gateway
    enabled: false
    # -- The basic auth username for the gateway
    username: null
    # -- The basic auth password for the gateway
    password: null
    # -- Uses the specified username and password to compute a htpasswd using Sprig's `htpasswd` function.
    # The value is templated using `tpl`. Override this to use a custom htpasswd, e.g. in case the default causes
    # high CPU load.
    # @default -- See values.yaml
    htpasswd: >-
      {{ htpasswd (required "'gateway.basicAuth.username' is required" .Values.gateway.basicAuth.username) (required "'gateway.basicAuth.password' is required" .Values.gateway.basicAuth.password) }}
    # -- Existing basic auth secret to use. Must contain '.htpasswd'
    existingSecret: null
  # Configures the readiness probe for the gateway
  readinessProbe:
    httpGet:
      path: /
      port: http
    initialDelaySeconds: 15
    timeoutSeconds: 1
  livenessProbe:
    httpGet:
      path: /
      port: http
    initialDelaySeconds: 30
  nginxConfig:
    # -- NGINX log format
    # @default -- See values.yaml
    logFormat: |-
      main '$remote_addr - $remote_user [$time_local]  $status '
              '"$request" $body_bytes_sent "$http_referer" '
              '"$http_user_agent" "$http_x_forwarded_for"';
    # -- Allows appending custom configuration to the server block
    serverSnippet: ""
    # -- Allows appending custom configuration to the http block
    httpSnippet: ""
    # -- Allows overriding the DNS resolver address nginx will use.
    resolver: ""
    # -- Config file contents for Nginx. Passed through the `tpl` function to allow templating
    # @default -- See values.yaml
    file: |
      worker_processes  5;  ## Default: 1
      error_log  /dev/stderr;
      pid        /tmp/nginx.pid;
      worker_rlimit_nofile 8192;

      events {
        worker_connections  4096;  ## Default: 1024
      }

      http {
        client_body_temp_path /tmp/client_temp;
        proxy_temp_path       /tmp/proxy_temp_path;
        fastcgi_temp_path     /tmp/fastcgi_temp;
        uwsgi_temp_path       /tmp/uwsgi_temp;
        scgi_temp_path        /tmp/scgi_temp;

        proxy_http_version    1.1;

        default_type application/octet-stream;
        log_format   {{ .Values.gateway.nginxConfig.logFormat }}

        {{- if .Values.gateway.verboseLogging }}
        access_log   /dev/stderr  main;
        {{- else }}

        map $status $loggable {
          ~^[23]  0;
          default 1;
        }
        access_log   /dev/stderr  main  if=$loggable;
        {{- end }}

        sendfile     on;
        tcp_nopush   on;
        {{- if .Values.gateway.nginxConfig.resolver }}
        resolver {{ .Values.gateway.nginxConfig.resolver }};
        {{- else }}
        resolver {{ .Values.global.dnsService }}.{{ .Values.global.dnsNamespace }}.svc.{{ .Values.global.clusterDomain }};
        {{- end }}

        {{- with .Values.gateway.nginxConfig.httpSnippet }}
        {{ . | nindent 2 }}
        {{- end }}

        server {
          listen             8080;

          {{- if .Values.gateway.basicAuth.enabled }}
          auth_basic           "Loki";
          auth_basic_user_file /etc/nginx/secrets/.htpasswd;
          {{- end }}

          location = / {
            return 200 'OK';
            auth_basic off;
            access_log off;
          }

          location = /api/prom/push {
            set $api_prom_push_backend http://{{ include "loki.distributorFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }};
            proxy_pass       $api_prom_push_backend:3100$request_uri;
            proxy_http_version 1.1;
          }

          location = /api/prom/tail {
            set $api_prom_tail_backend http://{{ include "loki.querierFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }};
            proxy_pass       $api_prom_tail_backend:3100$request_uri;
            proxy_set_header Upgrade $http_upgrade;
            proxy_set_header Connection "upgrade";
            proxy_http_version 1.1;
          }

          # Ruler
          location ~ /prometheus/api/v1/alerts.* {
            proxy_pass       http://{{ include "loki.rulerFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;
          }
          location ~ /prometheus/api/v1/rules.* {
            proxy_pass       http://{{ include "loki.rulerFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;
          }
          location ~ /api/prom/rules.* {
            proxy_pass       http://{{ include "loki.rulerFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;
          }
          location ~ /api/prom/alerts.* {
            proxy_pass       http://{{ include "loki.rulerFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }}:3100$request_uri;
          }

          location ~ /api/prom/.* {
            set $api_prom_backend http://{{ include "loki.queryFrontendFullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }};
            proxy_pass       $api_prom_backend:3100$request_uri;
            proxy_http_version 1.1;
          }

          location = /loki/api/v1/push {
            set $loki_api_v1_push_backend http://{{ include "loki.distributorFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }};
            proxy_pass       $loki_api_v1_push_backend:3100$request_uri;
            proxy_http_version 1.1;
          }

          location = /loki/api/v1/tail {
            set $loki_api_v1_tail_backend http://{{ include "loki.querierFullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }};
            proxy_pass       $loki_api_v1_tail_backend:3100$request_uri;
            proxy_set_header Upgrade $http_upgrade;
            proxy_set_header Connection "upgrade";
            proxy_http_version 1.1;
          }

          location ~ /loki/api/.* {
            set $loki_api_backend http://{{ include "loki.queryFrontendFullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.global.clusterDomain }};
            proxy_pass       $loki_api_backend:3100$request_uri;
            proxy_http_version 1.1;
          }

          {{- with .Values.gateway.nginxConfig.serverSnippet }}
          {{ . | nindent 4 }}
          {{- end }}
        }
      }

# Configuration for the compactor
compactor:
  # -- Specifies whether compactor should be enabled
  enabled: false
  image:
    # -- The Docker registry for the compactor image. Overrides `loki.image.registry`
    registry: null
    # -- Docker image repository for the compactor image. Overrides `loki.image.repository`
    repository: null
    # -- Docker image tag for the compactor image. Overrides `loki.image.tag`
    tag: null
  # -- Command to execute instead of defined in Docker image
  command: null
  # -- The name of the PriorityClass for compactor pods
  priorityClassName: null
  # -- Labels for compactor pods
  podLabels: {}
  # -- Annotations for compactor pods
  podAnnotations: {}
  # -- Specify the compactor affinity
  affinity: {}
  # -- Labels for compactor service
  serviceLabels: {}
  # -- Additional CLI args for the compactor
  extraArgs: []
  # -- Environment variables to add to the compactor pods
  extraEnv: []
  # -- Environment variables from secrets or configmaps to add to the compactor pods
  extraEnvFrom: []
  # -- Volume mounts to add to the compactor pods
  extraVolumeMounts: []
  # -- Volumes to add to the compactor pods
  extraVolumes: []
  # -- Resource requests and limits for the compactor
  resources: {}
  # -- Containers to add to the compactor pods
  extraContainers: []
  # -- Init containers to add to the compactor pods
  initContainers: []
  # -- Grace period to allow the compactor to shutdown before it is killed
  terminationGracePeriodSeconds: 30
  # -- Node selector for compactor pods
  nodeSelector: {}
  # -- Tolerations for compactor pods
  tolerations: []
  persistence:
    # -- Enable creating PVCs for the compactor
    enabled: false
    # -- Size of persistent disk
    size: 10Gi
    # -- Storage class to be used.
    # If defined, storageClassName: <storageClass>.
    # If set to "-", storageClassName: "", which disables dynamic provisioning.
    # If empty or set to null, no storageClassName spec is
    # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack).
    storageClass: null
    # -- Annotations for compactor PVCs
    annotations: {}
  serviceAccount:
    create: true
    # -- The name of the ServiceAccount to use for the compactor.
    # If not set and create is true, a name is generated by appending
    # "-compactor" to the common ServiceAccount.
    name: null
    # -- Image pull secrets for the compactor service account
    imagePullSecrets: []
    # -- Annotations for the compactor service account
    annotations: {}
    # -- Set this toggle to false to opt out of automounting API credentials for the service account
    automountServiceAccountToken: true

# Configuration for the ruler
ruler:
  # -- Specifies whether the ruler should be enabled
  enabled: false
  # -- Kind of deployment [StatefulSet/Deployment]
  kind: Deployment
  # -- Number of replicas for the ruler
  replicas: 1
  image:
    # -- The Docker registry for the ruler image. Overrides `loki.image.registry`
    registry: null
    # -- Docker image repository for the ruler image. Overrides `loki.image.repository`
    repository: null
    # -- Docker image tag for the ruler image. Overrides `loki.image.tag`
    tag: null
  # -- Command to execute instead of defined in Docker image
  command: null
  # -- The name of the PriorityClass for ruler pods
  priorityClassName: null
  # -- Labels for compactor pods
  podLabels: {}
  # -- Annotations for ruler pods
  podAnnotations: {}
  # -- Labels for ruler service
  serviceLabels: {}
  # -- Additional CLI args for the ruler
  extraArgs: []
  # -- Environment variables to add to the ruler pods
  extraEnv: []
  # -- Environment variables from secrets or configmaps to add to the ruler pods
  extraEnvFrom: []
  # -- Volume mounts to add to the ruler pods
  extraVolumeMounts: []
  # -- Volumes to add to the ruler pods
  extraVolumes: []
  # -- Resource requests and limits for the ruler
  resources: {}
  # -- Containers to add to the ruler pods
  extraContainers: []
  # -- Init containers to add to the ruler pods
  initContainers: []
  # -- Grace period to allow the ruler to shutdown before it is killed
  terminationGracePeriodSeconds: 300
  # -- Affinity for ruler pods. Passed through `tpl` and, thus, to be configured as string
  # @default -- Hard node and soft zone anti-affinity
  affinity: |
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        - labelSelector:
            matchLabels:
              {{- include "loki.rulerSelectorLabels" . | nindent 10 }}
          topologyKey: kubernetes.io/hostname
      preferredDuringSchedulingIgnoredDuringExecution:
        - weight: 100
          podAffinityTerm:
            labelSelector:
              matchLabels:
                {{- include "loki.rulerSelectorLabels" . | nindent 12 }}
            topologyKey: failure-domain.beta.kubernetes.io/zone
  # -- Pod Disruption Budget maxUnavailable
  maxUnavailable: null
  # -- Node selector for ruler pods
  nodeSelector: {}
  # -- Tolerations for ruler pods
  tolerations: []
  # -- DNSConfig for ruler pods
  dnsConfig: {}
  persistence:
    # -- Enable creating PVCs which is required when using recording rules
    enabled: false
    # -- Size of persistent disk
    size: 10Gi
    # -- Storage class to be used.
    # If defined, storageClassName: <storageClass>.
    # If set to "-", storageClassName: "", which disables dynamic provisioning.
    # If empty or set to null, no storageClassName spec is
    # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack).
    storageClass: null
    # -- Annotations for ruler PVCs
    annotations: {}
  # -- Directories containing rules files
  directories: {}
    # tenant_foo:
    #   rules1.txt: |
    #     groups:
    #       - name: should_fire
    #         rules:
    #           - alert: HighPercentageError
    #             expr: |
    #               sum(rate({app="foo", env="production"} |= "error" [5m])) by (job)
    #                 /
    #               sum(rate({app="foo", env="production"}[5m])) by (job)
    #                 > 0.05
    #             for: 10m
    #             labels:
    #               severity: warning
    #             annotations:
    #               summary: High error rate
    #       - name: credentials_leak
    #         rules:
    #           - alert: http-credentials-leaked
    #             annotations:
    #               message: "{{ $labels.job }} is leaking http basic auth credentials."
    #             expr: 'sum by (cluster, job, pod) (count_over_time({namespace="prod"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0)'
    #             for: 10m
    #             labels:
    #               severity: critical
    #   rules2.txt: |
    #     groups:
    #       - name: example
    #         rules:
    #         - alert: HighThroughputLogStreams
    #           expr: sum by(container) (rate({job=~"loki-dev/.*"}[1m])) > 1000
    #           for: 2m
    # tenant_bar:
    #   rules1.txt: |
    #     groups:
    #       - name: should_fire
    #         rules:
    #           - alert: HighPercentageError
    #             expr: |
    #               sum(rate({app="foo", env="production"} |= "error" [5m])) by (job)
    #                 /
    #               sum(rate({app="foo", env="production"}[5m])) by (job)
    #                 > 0.05
    #             for: 10m
    #             labels:
    #               severity: warning
    #             annotations:
    #               summary: High error rate
    #       - name: credentials_leak
    #         rules:
    #           - alert: http-credentials-leaked
    #             annotations:
    #               message: "{{ $labels.job }} is leaking http basic auth credentials."
    #             expr: 'sum by (cluster, job, pod) (count_over_time({namespace="prod"} |~ "http(s?)://(\\w+):(\\w+)@" [5m]) > 0)'
    #             for: 10m
    #             labels:
    #               severity: critical
    #   rules2.txt: |
    #     groups:
    #       - name: example
    #         rules:
    #         - alert: HighThroughputLogStreams
    #           expr: sum by(container) (rate({job=~"loki-dev/.*"}[1m])) > 1000
    #           for: 2m

# Configuration for the index-gateway
indexGateway:
  # -- Specifies whether the index-gateway should be enabled
  enabled: false
  # -- Number of replicas for the index-gateway
  replicas: 1
  image:
    # -- The Docker registry for the index-gateway image. Overrides `loki.image.registry`
    registry: null
    # -- Docker image repository for the index-gateway image. Overrides `loki.image.repository`
    repository: null
    # -- Docker image tag for the index-gateway image. Overrides `loki.image.tag`
    tag: null
  # -- The name of the PriorityClass for index-gateway pods
  priorityClassName: null
  # -- Labels for index-gateway pods
  podLabels: {}
  # -- Annotations for index-gateway pods
  podAnnotations: {}
  # -- Labels for index-gateway service
  serviceLabels: {}
  # -- Additional CLI args for the index-gateway
  extraArgs: []
  # -- Environment variables to add to the index-gateway pods
  extraEnv: []
  # -- Environment variables from secrets or configmaps to add to the index-gateway pods
  extraEnvFrom: []
  # -- Volume mounts to add to the index-gateway pods
  extraVolumeMounts: []
  # -- Volumes to add to the index-gateway pods
  extraVolumes: []
  # -- Resource requests and limits for the index-gateway
  resources: {}
  # -- Containers to add to the index-gateway pods
  extraContainers: []
  # -- Init containers to add to the index-gateway pods
  initContainers: []
  # -- Grace period to allow the index-gateway to shutdown before it is killed.
  terminationGracePeriodSeconds: 300
  # -- Affinity for index-gateway pods. Passed through `tpl` and, thus, to be configured as string
  # @default -- Hard node and soft zone anti-affinity
  affinity: |
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        - labelSelector:
            matchLabels:
              {{- include "loki.indexGatewaySelectorLabels" . | nindent 10 }}
          topologyKey: kubernetes.io/hostname
      preferredDuringSchedulingIgnoredDuringExecution:
        - weight: 100
          podAffinityTerm:
            labelSelector:
              matchLabels:
                {{- include "loki.indexGatewaySelectorLabels" . | nindent 12 }}
            topologyKey: failure-domain.beta.kubernetes.io/zone
  # -- Pod Disruption Budget maxUnavailable
  maxUnavailable: null
  # -- Node selector for index-gateway pods
  nodeSelector: {}
  # -- Tolerations for index-gateway pods
  tolerations: []
  persistence:
    # -- Enable creating PVCs which is required when using boltdb-shipper
    enabled: false
    # -- Use emptyDir with ramdisk for storage. **Please note that all data in indexGateway will be lost on pod restart**
    inMemory: false
    # -- Size of persistent or memory disk
    size: 10Gi
    # -- Storage class to be used.
    # If defined, storageClassName: <storageClass>.
    # If set to "-", storageClassName: "", which disables dynamic provisioning.
    # If empty or set to null, no storageClassName spec is
    # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack).
    storageClass: null
    # -- Annotations for index gateway PVCs
    annotations: {}

memcached:
  readinessProbe:
    tcpSocket:
      port: http
    initialDelaySeconds: 5
    timeoutSeconds: 1
  livenessProbe:
    tcpSocket:
      port: http
    initialDelaySeconds: 10
  image:
    # -- The Docker registry for the memcached
    registry: docker.io
    # -- Memcached Docker image repository
    repository: memcached
    # -- Memcached Docker image tag
    tag: 1.6.17-alpine
    # -- Memcached Docker image pull policy
    pullPolicy: IfNotPresent
  # -- Labels for memcached pods
  podLabels: {}
  # -- The SecurityContext for memcached pods
  podSecurityContext:
    fsGroup: 11211
    runAsGroup: 11211
    runAsNonRoot: true
    runAsUser: 11211
  # -- The SecurityContext for memcached containers
  containerSecurityContext:
    readOnlyRootFilesystem: true
    capabilities:
      drop:
        - ALL
    allowPrivilegeEscalation: false
  # -- Common annotations for all memcached services
  serviceAnnotations: {}
  # -- Adds the appProtocol field to the memcached services. This allows memcached to work with istio protocol selection. Ex: "http" or "tcp"
  appProtocol: ""

memcachedExporter:
  # -- Specifies whether the Memcached Exporter should be enabled
  enabled: false
  image:
    # -- The Docker registry for the Memcached Exporter
    registry: docker.io
    # -- Memcached Exporter Docker image repository
    repository: prom/memcached-exporter
    # -- Memcached Exporter Docker image tag
    tag: v0.6.0
    # -- Memcached Exporter Docker image pull policy
    pullPolicy: IfNotPresent
  # -- Labels for memcached-exporter pods
  podLabels: {}
  # -- Memcached Exporter resource requests and limits
  resources: {}
  # -- The SecurityContext for memcachedExporter containers
  containerSecurityContext:
    readOnlyRootFilesystem: true
    capabilities:
      drop:
        - ALL
    allowPrivilegeEscalation: false

memcachedChunks:
  # -- Specifies whether the Memcached chunks cache should be enabled
  enabled: true
  # -- Number of replicas for memcached-chunks
  replicas: 1
  # -- The name of the PriorityClass for memcached-chunks pods
  priorityClassName: null
  # -- Labels for memcached-chunks pods
  podLabels: {}
  # -- Annotations for memcached-chunks pods
  podAnnotations: {}
  # -- Labels for memcached-chunks service
  serviceLabels: {}
  # -- Additional CLI args for memcached-chunks
  extraArgs:
    - -I 32m
  # -- Environment variables to add to memcached-chunks pods
  extraEnv: []
  # -- Environment variables from secrets or configmaps to add to memcached-chunks pods
  extraEnvFrom: []
  # -- Resource requests and limits for memcached-chunks
  resources: {}
  # -- Containers to add to the memcached-chunks pods
  extraContainers: []
  # -- Grace period to allow memcached-chunks to shutdown before it is killed
  terminationGracePeriodSeconds: 30
  # -- Affinity for memcached-chunks pods. Passed through `tpl` and, thus, to be configured as string
  # @default -- Hard node and soft zone anti-affinity
  affinity: |
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        - labelSelector:
            matchLabels:
              {{- include "loki.memcachedChunksSelectorLabels" . | nindent 10 }}
          topologyKey: kubernetes.io/hostname
      preferredDuringSchedulingIgnoredDuringExecution:
        - weight: 100
          podAffinityTerm:
            labelSelector:
              matchLabels:
                {{- include "loki.memcachedChunksSelectorLabels" . | nindent 12 }}
            topologyKey: failure-domain.beta.kubernetes.io/zone
  # -- Pod Disruption Budget maxUnavailable
  maxUnavailable: null
  # -- Node selector for memcached-chunks pods
  nodeSelector: {}
  # -- Tolerations for memcached-chunks pods
  tolerations: []
  persistence:
    # -- Enable creating PVCs which will persist cached data through restarts
    enabled: false
    # -- Size of persistent or memory disk
    size: 10Gi
    # -- Storage class to be used.
    # If defined, storageClassName: <storageClass>.
    # If set to "-", storageClassName: "", which disables dynamic provisioning.
    # If empty or set to null, no storageClassName spec is
    # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack).
    storageClass: null

memcachedFrontend:
  # -- Specifies whether the Memcached frontend cache should be enabled
  enabled: true
  # -- Number of replicas for memcached-frontend
  replicas: 1
  # -- The name of the PriorityClass for memcached-frontend pods
  priorityClassName: null
  # -- Labels for memcached-frontend pods
  podLabels: {}
  # -- Annotations for memcached-frontend pods
  podAnnotations: {}
  # -- Labels for memcached-frontend service
  serviceLabels: {}
  # -- Additional CLI args for memcached-frontend
  extraArgs:
    - -I 32m
  # -- Environment variables to add to memcached-frontend pods
  extraEnv: []
  # -- Environment variables from secrets or configmaps to add to memcached-frontend pods
  extraEnvFrom: []
  # -- Resource requests and limits for memcached-frontend
  resources: {}
  # -- Containers to add to the memcached-frontend pods
  extraContainers: []
  # -- Grace period to allow memcached-frontend to shutdown before it is killed
  terminationGracePeriodSeconds: 30
  # -- Affinity for memcached-frontend pods. Passed through `tpl` and, thus, to be configured as string
  # @default -- Hard node and soft zone anti-affinity
  affinity: |
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        - labelSelector:
            matchLabels:
              {{- include "loki.memcachedFrontendSelectorLabels" . | nindent 10 }}
          topologyKey: kubernetes.io/hostname
      preferredDuringSchedulingIgnoredDuringExecution:
        - weight: 100
          podAffinityTerm:
            labelSelector:
              matchLabels:
                {{- include "loki.memcachedFrontendSelectorLabels" . | nindent 12 }}
            topologyKey: failure-domain.beta.kubernetes.io/zone
  # -- Pod Disruption Budget maxUnavailable
  maxUnavailable: 1
  # -- Node selector for memcached-frontend pods
  nodeSelector: {}
  # -- Tolerations for memcached-frontend pods
  tolerations: []
  persistence:
    # -- Enable creating PVCs which will persist cached data through restarts
    enabled: false
    # -- Size of persistent or memory disk
    size: 10Gi
    # -- Storage class to be used.
    # If defined, storageClassName: <storageClass>.
    # If set to "-", storageClassName: "", which disables dynamic provisioning.
    # If empty or set to null, no storageClassName spec is
    # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack).
    storageClass: null

memcachedIndexQueries:
  # -- Specifies whether the Memcached index queries cache should be enabled
  enabled: true
  # -- Number of replicas for memcached-index-queries
  replicas: 1
  # -- The name of the PriorityClass for memcached-index-queries pods
  priorityClassName: null
  # -- Labels for memcached-index-queries pods
  podLabels: {}
  # -- Annotations for memcached-index-queries pods
  podAnnotations: {}
  # -- Labels for memcached-index-queries service
  serviceLabels: {}
  # -- Additional CLI args for memcached-index-queries
  extraArgs:
    - -I 32m
  # -- Environment variables to add to memcached-index-queries pods
  extraEnv: []
  # -- Environment variables from secrets or configmaps to add to memcached-index-queries pods
  extraEnvFrom: []
  # -- Resource requests and limits for memcached-index-queries
  resources: {}
  # -- Containers to add to the memcached-index-queries pods
  extraContainers: []
  # -- Grace period to allow memcached-index-queries to shutdown before it is killed
  terminationGracePeriodSeconds: 30
  # -- Affinity for memcached-index-queries pods. Passed through `tpl` and, thus, to be configured as string
  # @default -- Hard node and soft zone anti-affinity
  affinity: |
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        - labelSelector:
            matchLabels:
              {{- include "loki.memcachedIndexQueriesSelectorLabels" . | nindent 10 }}
          topologyKey: kubernetes.io/hostname
      preferredDuringSchedulingIgnoredDuringExecution:
        - weight: 100
          podAffinityTerm:
            labelSelector:
              matchLabels:
                {{- include "loki.memcachedIndexQueriesSelectorLabels" . | nindent 12 }}
            topologyKey: failure-domain.beta.kubernetes.io/zone
  # -- Pod Disruption Budget maxUnavailable
  maxUnavailable: null
  # -- Node selector for memcached-index-queries pods
  nodeSelector: {}
  # -- Tolerations for memcached-index-queries pods
  tolerations: []
  persistence:
    # -- Enable creating PVCs which will persist cached data through restarts
    enabled: false
    # -- Size of persistent or memory disk
    size: 10Gi
    # -- Storage class to be used.
    # If defined, storageClassName: <storageClass>.
    # If set to "-", storageClassName: "", which disables dynamic provisioning.
    # If empty or set to null, no storageClassName spec is
    # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack).
    storageClass: null

memcachedIndexWrites:
  # -- Specifies whether the Memcached index writes cache should be enabled
  enabled: false
  # -- Number of replicas for memcached-index-writes
  replicas: 1
  # -- The name of the PriorityClass for memcached-index-writes pods
  priorityClassName: null
  # -- Labels for memcached-index-writes pods
  podLabels: {}
  # -- Annotations for memcached-index-writes pods
  podAnnotations: {}
  # -- Labels for memcached-index-writes service
  serviceLabels: {}
  # -- Additional CLI args for memcached-index-writes
  extraArgs:
    - -I 32m
  # -- Environment variables to add to memcached-index-writes pods
  extraEnv: []
  # -- Environment variables from secrets or configmaps to add to memcached-index-writes pods
  extraEnvFrom: []
  # -- Resource requests and limits for memcached-index-writes
  resources: {}
  # -- Containers to add to the memcached-index-writes pods
  extraContainers: []
  # -- Grace period to allow memcached-index-writes to shutdown before it is killed
  terminationGracePeriodSeconds: 30
  # -- Affinity for memcached-index-writes pods. Passed through `tpl` and, thus, to be configured as string
  # @default -- Hard node and soft zone anti-affinity
  affinity: |
    podAntiAffinity:
      requiredDuringSchedulingIgnoredDuringExecution:
        - labelSelector:
            matchLabels:
              {{- include "loki.memcachedIndexWritesSelectorLabels" . | nindent 10 }}
          topologyKey: kubernetes.io/hostname
      preferredDuringSchedulingIgnoredDuringExecution:
        - weight: 100
          podAffinityTerm:
            labelSelector:
              matchLabels:
                {{- include "loki.memcachedIndexWritesSelectorLabels" . | nindent 12 }}
            topologyKey: failure-domain.beta.kubernetes.io/zone
  # -- Pod Disruption Budget maxUnavailable
  maxUnavailable: null
  # -- Node selector for memcached-index-writes pods
  nodeSelector: {}
  # -- Tolerations for memcached-index-writes pods
  tolerations: []
  persistence:
    # -- Enable creating PVCs which will persist cached data through restarts
    enabled: false
    # -- Size of persistent or memory disk
    size: 10Gi
    # -- Storage class to be used.
    # If defined, storageClassName: <storageClass>.
    # If set to "-", storageClassName: "", which disables dynamic provisioning.
    # If empty or set to null, no storageClassName spec is
    # set, choosing the default provisioner (gp2 on AWS, standard on GKE, AWS, and OpenStack).
    storageClass: null

networkPolicy:
  # -- Specifies whether Network Policies should be created
  enabled: false
  metrics:
    # -- Specifies the Pods which are allowed to access the metrics port.
    # As this is cross-namespace communication, you also need the namespaceSelector.
    podSelector: {}
    # -- Specifies the namespaces which are allowed to access the metrics port
    namespaceSelector: {}
    # -- Specifies specific network CIDRs which are allowed to access the metrics port.
    # In case you use namespaceSelector, you also have to specify your kubelet networks here.
    # The metrics ports are also used for probes.
    cidrs: []
  ingress:
    # -- Specifies the Pods which are allowed to access the http port.
    # As this is cross-namespace communication, you also need the namespaceSelector.
    podSelector: {}
    # -- Specifies the namespaces which are allowed to access the http port
    namespaceSelector: {}
  alertmanager:
    # -- Specify the alertmanager port used for alerting
    port: 9093
    # -- Specifies the alertmanager Pods.
    # As this is cross-namespace communication, you also need the namespaceSelector.
    podSelector: {}
    # -- Specifies the namespace the alertmanager is running in
    namespaceSelector: {}
  externalStorage:
    # -- Specify the port used for external storage, e.g. AWS S3
    ports: []
    # -- Specifies specific network CIDRs you want to limit access to
    cidrs: []
  discovery:
    # -- Specify the port used for discovery
    port: null
    # -- Specifies the Pods labels used for discovery.
    # As this is cross-namespace communication, you also need the namespaceSelector.
    podSelector: {}
    # -- Specifies the namespace the discovery Pods are running in
    namespaceSelector: {}

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant