diff --git a/.travis.yml b/.travis.yml index 0d912a0ecb..f8bcec4046 100644 --- a/.travis.yml +++ b/.travis.yml @@ -7,10 +7,10 @@ before_install: - rm -f ./Gemfile.lock - gem install bundler # yq - - curl --retry 10 --retry-max-time 120 --retry-delay 5 -L --remote-name https://github.com/mikefarah/yq/releases/download/3.2.1/yq_linux_amd64 - - sudo mv yq_linux_amd64 /usr/local/bin/yq-3.2.1 - - sudo chmod +x /usr/local/bin/yq-3.2.1 - - sudo ln -s /usr/local/bin/yq-3.2.1 /usr/local/bin/yq + - curl --retry 10 --retry-max-time 120 --retry-delay 5 -L --remote-name https://github.com/mikefarah/yq/releases/download/3.4.1/yq_linux_amd64 + - sudo mv yq_linux_amd64 /usr/local/bin/yq-3.4.1 + - sudo chmod +x /usr/local/bin/yq-3.4.1 + - sudo ln -s /usr/local/bin/yq-3.4.1 /usr/local/bin/yq # shellcheck - curl --retry 10 --retry-max-time 120 --retry-delay 5 -Lo- https://github.com/koalaman/shellcheck/releases/download/v0.7.1/shellcheck-v0.7.1.linux.x86_64.tar.xz | tar -xJf - - sudo cp shellcheck-v0.7.1/shellcheck /usr/local/bin && rm -rf shellcheck-v0.7.1 diff --git a/ci/tests.sh b/ci/tests.sh index f94730b706..fbdb96ae05 100755 --- a/ci/tests.sh +++ b/ci/tests.sh @@ -69,6 +69,10 @@ echo "Test helm templates generation" echo "Test upgrade script..." ./tests/upgrade_script/run.sh || (echo "Failed testing upgrade script" && exit 1) +# Test upgrade v2 script +echo "Test upgrade v2 script..." +./tests/upgrade_v2_script/run.sh || (echo "Failed testing upgrade v2 script" && exit 1) + # Test fluentd plugins test_fluentd_plugins "${VERSION}" || (echo "Failed testing fluentd plugins" && exit 1) diff --git a/deploy/docs/v2_migration_doc.md b/deploy/docs/v2_migration_doc.md index fd08234bca..d67c2f04f3 100644 --- a/deploy/docs/v2_migration_doc.md +++ b/deploy/docs/v2_migration_doc.md @@ -70,6 +70,9 @@ the exact steps for migration. - name: "config-reloader" ``` +- We've separated our fluentd image from setup job image, hence `image` was migrated + to `sumologic.setup.job.image` and to `fluentd.image` + ## How to upgrade **Note: The below steps are using Helm 3. Helm 2 is not supported.** diff --git a/deploy/helm/sumologic/README.md b/deploy/helm/sumologic/README.md index 639dc217fc..5fb0741a1e 100644 --- a/deploy/helm/sumologic/README.md +++ b/deploy/helm/sumologic/README.md @@ -11,9 +11,6 @@ The following table lists the configurable parameters of the Sumo Logic chart an Parameter | Description | Default --- | --- | --- -`image.repository` | Image repository for Sumo Logic docker container. | `sumologic/kubernetes-fluentd` -`image.tag` | Image tag for Sumo Logic docker container. | `1.0.0-rc.2` -`image.pullPolicy` | Image pullPolicy for Sumo Logic docker container. | `IfNotPresent` `nameOverride` | Used to override the Chart name. | `Nil` `sumologic.setupEnabled` | If enabled, a pre-install hook will create Collector and Sources in Sumo Logic. | `true` `sumologic.cleanUpEnabled` | If enabled, a pre-delete hook will destroy Kubernetes secret and Sumo Logic Collector. | `false` @@ -39,7 +36,13 @@ Parameter | Description | Default `sumologic.setup.job.annotations` | Annotations for the Job. | `[{"helm.sh/hook":"pre-install,pre-upgrade","helm.sh/hook-delete-policy":"before-hook-creation,hook-succeeded","helm.sh/hook-weight":"3"}]` `sumologic.setup.job.podLabels` | Additional labels for the setup Job pod. | `{}` `sumologic.setup.job.podAnnotations` | Additional annotations for the setup Job pod. | `{}` +`sumologic.setup.job.image.repository` | Image repository for Sumo Logic setup job docker container. | `sumologic/kubernetes-fluentd` +`sumologic.setup.job.image.tag` | Image tag for Sumo Logic setup job docker container. | `1.3.0` +`sumologic.setup.job.image.pullPolicy` | Image pullPolicy for Sumo Logic docker container. | `IfNotPresent` `sumologic.setup.serviceAccount.annotations` | Annotations for the ServiceAccount. | `[{"helm.sh/hook":"pre-install,pre-upgrade","helm.sh/hook-delete-policy":"before-hook-creation,hook-succeeded","helm.sh/hook-weight":"0"}]` +`fluentd.image.repository` | Image repository for Sumo Logic docker container. | `sumologic/kubernetes-fluentd` +`fluentd.image.tag` | Image tag for Sumo Logic docker container. | `1.3.0` +`fluentd.image.pullPolicy` | Image pullPolicy for Sumo Logic docker container. | `IfNotPresent` `fluentd.additionalPlugins` | Additional Fluentd plugins to install from RubyGems. Please see our [documentation](./Additional_Fluentd_Plugins.md) for more information. | `[]` `fluentd.compression.enabled` | Flag to control if data is sent to Sumo Logic compressed or not | `true` `fluentd.compression.encoding` | Specifies which encoding should be used to compress data (either `gzip` or `deflate`) | `gzip` diff --git a/deploy/helm/sumologic/templates/cleanup/cleanup-job.yaml b/deploy/helm/sumologic/templates/cleanup/cleanup-job.yaml index 06c89b39a5..42e16cdcf4 100644 --- a/deploy/helm/sumologic/templates/cleanup/cleanup-job.yaml +++ b/deploy/helm/sumologic/templates/cleanup/cleanup-job.yaml @@ -40,8 +40,8 @@ spec: mountPath: /etc/terraform containers: - name: cleanup - image: {{ .Values.image.repository }}:{{ .Values.image.tag }} - imagePullPolicy: {{ .Values.image.pullPolicy }} + image: {{ .Values.sumologic.setup.job.image.repository }}:{{ .Values.sumologic.setup.job.image.tag }} + imagePullPolicy: {{ .Values.sumologic.setup.job.image.pullPolicy }} volumeMounts: - name: cleanup mountPath: /etc/terraform diff --git a/deploy/helm/sumologic/templates/events-statefulset.yaml b/deploy/helm/sumologic/templates/events-statefulset.yaml index 0a112078f8..f257a1a0c8 100644 --- a/deploy/helm/sumologic/templates/events-statefulset.yaml +++ b/deploy/helm/sumologic/templates/events-statefulset.yaml @@ -61,8 +61,8 @@ spec: {{- end }} containers: - name: fluentd-events - image: {{ .Values.image.repository }}:{{ .Values.image.tag }} - imagePullPolicy: {{ .Values.image.pullPolicy }} + image: {{ .Values.fluentd.image.repository }}:{{ .Values.fluentd.image.tag }} + imagePullPolicy: {{ .Values.fluentd.image.pullPolicy }} resources: {{- toYaml .Values.fluentd.events.statefulset.resources | nindent 10 }} volumeMounts: diff --git a/deploy/helm/sumologic/templates/metrics-statefulset.yaml b/deploy/helm/sumologic/templates/metrics-statefulset.yaml index b6833bf817..4606a333e2 100644 --- a/deploy/helm/sumologic/templates/metrics-statefulset.yaml +++ b/deploy/helm/sumologic/templates/metrics-statefulset.yaml @@ -101,8 +101,8 @@ spec: {{- end }} containers: - name: fluentd - image: {{ .Values.image.repository }}:{{ .Values.image.tag }} - imagePullPolicy: {{ .Values.image.pullPolicy }} + image: {{ .Values.fluentd.image.repository }}:{{ .Values.fluentd.image.tag }} + imagePullPolicy: {{ .Values.fluentd.image.pullPolicy }} resources: {{- toYaml .Values.fluentd.metrics.statefulset.resources | nindent 10 }} ports: diff --git a/deploy/helm/sumologic/templates/setup/setup-job.yaml b/deploy/helm/sumologic/templates/setup/setup-job.yaml index e5b63ec81d..0e3526e072 100644 --- a/deploy/helm/sumologic/templates/setup/setup-job.yaml +++ b/deploy/helm/sumologic/templates/setup/setup-job.yaml @@ -43,8 +43,8 @@ spec: defaultMode: 0777 containers: - name: setup - image: {{ .Values.image.repository }}:{{ .Values.image.tag }} - imagePullPolicy: {{ .Values.image.pullPolicy }} + image: {{ .Values.sumologic.setup.job.image.repository }}:{{ .Values.sumologic.setup.job.image.tag }} + imagePullPolicy: {{ .Values.sumologic.setup.job.image.pullPolicy }} command: ["/etc/terraform/setup.sh"] resources: {{- toYaml .Values.sumologic.setup.job.resources | nindent 10 }} diff --git a/deploy/helm/sumologic/templates/statefulset.yaml b/deploy/helm/sumologic/templates/statefulset.yaml index 5a3e599419..8194f59418 100644 --- a/deploy/helm/sumologic/templates/statefulset.yaml +++ b/deploy/helm/sumologic/templates/statefulset.yaml @@ -101,8 +101,8 @@ spec: {{- end }} containers: - name: fluentd - image: {{ .Values.image.repository }}:{{ .Values.image.tag }} - imagePullPolicy: {{ .Values.image.pullPolicy }} + image: {{ .Values.fluentd.image.repository }}:{{ .Values.fluentd.image.tag }} + imagePullPolicy: {{ .Values.fluentd.image.pullPolicy }} resources: {{- toYaml .Values.fluentd.logs.statefulset.resources | nindent 10 }} ports: diff --git a/deploy/helm/sumologic/upgrade-1.0.0.sh b/deploy/helm/sumologic/upgrade-1.0.0.sh index 75ce3a8b3f..4364507e02 100755 --- a/deploy/helm/sumologic/upgrade-1.0.0.sh +++ b/deploy/helm/sumologic/upgrade-1.0.0.sh @@ -11,7 +11,7 @@ readonly PREVIOUS_VERSION=0.17 readonly TEMP_FILE=upgrade-1.0.0-temp-file readonly MIN_BASH_VERSION=4.0 -readonly MIN_YQ_VERSION=3.2.1 +readonly MIN_YQ_VERSION=3.4.1 readonly KEY_MAPPINGS=" eventsDeployment.nodeSelector:fluentd.events.statefulset.nodeSelector @@ -435,7 +435,7 @@ prometheus-operator: - ${NAMESPACE} selector: matchLabels: - app: ${HELM_RELEASE_NAME}-${NAMESPACE}-fluentd-events" | yq m -a -i "${TEMP_FILE}" - + app: ${HELM_RELEASE_NAME}-${NAMESPACE}-fluentd-events" | yq m -a=append -i "${TEMP_FILE}" - fi if [[ -n "$(yq r "${TEMP_FILE}" -- prometheus-operator.prometheus.prometheusSpec.containers)" ]]; then diff --git a/deploy/helm/sumologic/upgrade-2.0.0.sh b/deploy/helm/sumologic/upgrade-2.0.0.sh index 29092cbda5..4a10317cef 100755 --- a/deploy/helm/sumologic/upgrade-2.0.0.sh +++ b/deploy/helm/sumologic/upgrade-2.0.0.sh @@ -18,6 +18,12 @@ prometheus-operator.prometheusOperator.tlsProxy.enabled:kube-prometheus-stack.pr readonly KEY_VALUE_MAPPINGS=" " +readonly KEY_MAPPINGS_MULTIPLE=" +image.repository:fluentd.image.repository:sumologic.setup.job.image.repository +image.tag:fluentd.image.tag:sumologic.setup.job.image.tag +image.pullPolicy:fluentd.image.pullPolicy:sumologic.setup.job.image.pullPolicy +" + readonly KEYS_TO_DELETE=" prometheus-operator " @@ -114,6 +120,11 @@ function create_temp_file() { } function migrate_prometheus_operator_to_kube_prometheus_stack() { + # Nothing to migrate, return + if [[ -z $(yq r "${TEMP_FILE}" prometheus-operator) ]] ; then + return + fi + info "Migrating prometheus-config-reloader container to config-reloader in prometheusSpec" yq m -i --arrays append \ "${TEMP_FILE}" \ @@ -152,6 +163,8 @@ function migrate_customer_keys() { readonly MAPPINGS IFS=$'\n' read -r -d ' ' -a MAPPINGS_KEY_VALUE <<< "${KEY_VALUE_MAPPINGS}" readonly MAPPINGS_KEY_VALUE + IFS=$'\n' read -r -d ' ' -a MAPPINGS_MULTIPLE <<< "${KEY_MAPPINGS_MULTIPLE}" + readonly MAPPINGS_MULTIPLE set -e readonly CUSTOMER_KEYS=$(yq --printMode p r "${OLD_VALUES_YAML}" -- '**') @@ -167,6 +180,19 @@ function migrate_customer_keys() { yq d -i "${TEMP_FILE}" -- "${maps[0]}" fi done + elif [[ ${MAPPINGS_MULTIPLE[*]} =~ ${key}: ]]; then + # whatever you want to do when arr contains value + info "Mapping ${key} into:" + for i in "${MAPPINGS_MULTIPLE[@]}"; do + IFS=':' read -r -a maps <<< "${i}" + if [[ ${maps[0]} == "${key}" ]]; then + for element in "${maps[@]:1}"; do + info "- ${element}" + yq w -i "${TEMP_FILE}" -- "${element}" "$(yq r "${OLD_VALUES_YAML}" -- "${maps[0]}")" + yq d -i "${TEMP_FILE}" -- "${maps[0]}" + done + fi + done else yq w -i "${TEMP_FILE}" -- "${key}" "$(yq r "${OLD_VALUES_YAML}" -- "${key}")" fi @@ -195,7 +221,6 @@ function migrate_pre_upgrade_hook() { fi } - function get_regex() { # Get regex from old yaml file and strip `'` and `"` from beginning/end of it local write_index="${1}" @@ -208,8 +233,10 @@ function check_user_image() { readonly USER_VERSION="$(yq r "${OLD_VALUES_YAML}" -- image.tag)" if [[ -n "${USER_VERSION}" ]]; then if [[ "${USER_VERSION}" =~ ^"${PREVIOUS_VERSION}"\.[[:digit:]]+$ ]]; then - yq w -i "${TEMP_FILE}" -- image.tag 2.0.0 - info "Changing image.tag from '${USER_VERSION}' to '2.0.0'" + info "Migrating from image.tag '${USER_VERSION}' to sumologic.setup.job.image.tag '2.0.0'" + yq w -i "${TEMP_FILE}" -- sumologic.setup.job.image.tag 2.0.0 + info "Migrating from image.tag '${USER_VERSION}' to fluentd.image.tag '2.0.0'" + yq w -i "${TEMP_FILE}" -- fluentd.image.tag 2.0.0 else warning "You are using unsupported version: ${USER_VERSION}" warning "Please upgrade to '${PREVIOUS_VERSION}.x' or ensure that new_values.yaml is valid" diff --git a/deploy/helm/sumologic/values.yaml b/deploy/helm/sumologic/values.yaml index 9eb6488c30..694437a3cf 100644 --- a/deploy/helm/sumologic/values.yaml +++ b/deploy/helm/sumologic/values.yaml @@ -1,11 +1,6 @@ ## Sumo Logic Kubernetes Collection configuration file ## All the comments start with two or more # characters -image: - repository: sumologic/kubernetes-fluentd - tag: 1.3.0 - pullPolicy: IfNotPresent - nameOverride: "" sumologic: @@ -91,6 +86,10 @@ sumologic: setup: job: + image: + repository: sumologic/kubernetes-fluentd + tag: 1.3.0 + pullPolicy: IfNotPresent resources: limits: memory: 256Mi @@ -195,6 +194,11 @@ sumologic: spans_per_request: 100 fluentd: + image: + repository: sumologic/kubernetes-fluentd + tag: 1.3.0 + pullPolicy: IfNotPresent + ## Specifies whether a PodSecurityPolicy should be created podSecurityPolicy: create: false diff --git a/tests/run.sh b/tests/run.sh index 8fd95898d5..36518ca366 100755 --- a/tests/run.sh +++ b/tests/run.sh @@ -1,4 +1,4 @@ -#!/bin/bash +#!/usr/bin/env bash readonly SCRIPT_PATH="$( dirname "$(realpath "${0}")" )" readonly CONFIG_FILES=$(find "${SCRIPT_PATH}"/* -maxdepth 1 -name 'config.sh') diff --git a/tests/upgrade_script/static/chart_envs.output.yaml b/tests/upgrade_script/static/chart_envs.output.yaml index 55e5b8a7b8..e829fa18ab 100644 --- a/tests/upgrade_script/static/chart_envs.output.yaml +++ b/tests/upgrade_script/static/chart_envs.output.yaml @@ -1,14 +1,14 @@ fluent-bit: env: - - name: CHART - valueFrom: - configMapKeyRef: - name: sumologic-configmap - key: fluentdLogs - - name: NAMESPACE - valueFrom: - fieldRef: - fieldPath: metadata.namespace + - name: CHART + valueFrom: + configMapKeyRef: + name: sumologic-configmap + key: fluentdLogs + - name: NAMESPACE + valueFrom: + fieldRef: + fieldPath: metadata.namespace prometheus-operator: prometheus: prometheusSpec: @@ -16,15 +16,15 @@ prometheus-operator: thanos: version: v0.10.0 containers: - - name: prometheus-config-reloader - env: - - name: CHART - valueFrom: - configMapKeyRef: - name: sumologic-configmap - key: fluentdMetrics - - name: NAMESPACE - valueFrom: - configMapKeyRef: - name: sumologic-configmap - key: fluentdNamespace + - name: prometheus-config-reloader + env: + - name: CHART + valueFrom: + configMapKeyRef: + name: sumologic-configmap + key: fluentdMetrics + - name: NAMESPACE + valueFrom: + configMapKeyRef: + name: sumologic-configmap + key: fluentdNamespace diff --git a/tests/upgrade_script/static/key_mapping_one_to_multiple.output.yaml b/tests/upgrade_script/static/key_mapping_one_to_multiple.output.yaml index 6981a7fc31..12d5047f5d 100644 --- a/tests/upgrade_script/static/key_mapping_one_to_multiple.output.yaml +++ b/tests/upgrade_script/static/key_mapping_one_to_multiple.output.yaml @@ -17,8 +17,7 @@ fluentd: enabled: fluentd.logs.autoscaling.enabled, fluentd.metrics.autoscaling.enabled maxReplicas: fluentd.logs.autoscaling.maxReplicas, fluentd.metrics.autoscaling.maxReplicas minReplicas: fluentd.logs.autoscaling.minReplicas, fluentd.metrics.autoscaling.minReplicas - targetCPUUtilizationPercentage: fluentd.logs.autoscaling.targetCPUUtilizationPercentage, - fluentd.metrics.autoscaling.targetCPUUtilizationPercentage + targetCPUUtilizationPercentage: fluentd.logs.autoscaling.targetCPUUtilizationPercentage, fluentd.metrics.autoscaling.targetCPUUtilizationPercentage metrics: statefulset: affinity: fluentd.logs.statefulset.affinity, fluentd.metrics.statefulset.affinity @@ -37,5 +36,4 @@ fluentd: enabled: fluentd.logs.autoscaling.enabled, fluentd.metrics.autoscaling.enabled maxReplicas: fluentd.logs.autoscaling.maxReplicas, fluentd.metrics.autoscaling.maxReplicas minReplicas: fluentd.logs.autoscaling.minReplicas, fluentd.metrics.autoscaling.minReplicas - targetCPUUtilizationPercentage: fluentd.logs.autoscaling.targetCPUUtilizationPercentage, - fluentd.metrics.autoscaling.targetCPUUtilizationPercentage + targetCPUUtilizationPercentage: fluentd.logs.autoscaling.targetCPUUtilizationPercentage, fluentd.metrics.autoscaling.targetCPUUtilizationPercentage diff --git a/tests/upgrade_script/static/prometheus_remote_write.output.yaml b/tests/upgrade_script/static/prometheus_remote_write.output.yaml index 9553a158fd..55c4d7b065 100644 --- a/tests/upgrade_script/static/prometheus_remote_write.output.yaml +++ b/tests/upgrade_script/static/prometheus_remote_write.output.yaml @@ -2,85 +2,85 @@ prometheus-operator: prometheus: prometheusSpec: remoteWrite: - - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.state - writeRelabelConfigs: - - action: keep - regex: kube-state-metrics;(?:kube_statefulset_status_observed_generation|kube_statefulset_status_replicas|kube_statefulset_replicas|kube_statefulset_metadata_generation|kube_daemonset_status_current_number_scheduled|kube_daemonset_status_desired_number_scheduled|kube_daemonset_status_number_misscheduled|kube_daemonset_status_number_unavailable|kube_deployment_spec_replicas|kube_deployment_status_replicas_available|kube_deployment_status_replicas_unavailable|kube_node_info|kube_node_status_allocatable|kube_node_status_capacity|kube_node_status_condition|kube_pod_container_info|kube_pod_container_resource_requests|kube_pod_container_resource_limits|kube_pod_container_status_ready|kube_pod_container_status_terminated_reason|kube_pod_container_status_waiting_reason|kube_pod_container_status_restarts_total|kube_pod_status_phase) - sourceLabels: - - job - - __name__ - - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.controller-manager - writeRelabelConfigs: - - action: keep - regex: kubelet;cloudprovider_.*_api_request_duration_seconds.* - sourceLabels: - - job - - __name__ - - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.scheduler - writeRelabelConfigs: - - action: keep - regex: kube-scheduler;scheduler_(?:e2e_scheduling|binding|scheduling_algorithm)_latency_microseconds.* - sourceLabels: - - job - - __name__ - - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.apiserver - writeRelabelConfigs: - - action: keep - regex: apiserver;(?:apiserver_request_(?:count|total)|apiserver_request_(?:duration_seconds|latencies)_(?:count|sum)|apiserver_request_latencies_summary(?:|_count|_sum)|etcd_request_cache_(?:add|get)_(?:duration_seconds|latencies_summary)_(?:count|sum)|etcd_helper_cache_(?:hit|miss)_(?:count|total)) - sourceLabels: - - job - - __name__ - - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.kubelet - writeRelabelConfigs: - - action: keep - regex: kubelet;(?:kubelet_docker_operations_errors(?:|_total)|kubelet_(?:docker|runtime)_operations_duration_seconds_(?:count|sum)|kubelet_running_(?:container|pod)_count|kubelet_(:?docker|runtime)_operations_latency_microseconds(?:|_count|_sum)) - sourceLabels: - - job - - __name__ - - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.container - writeRelabelConfigs: - - action: labelmap - regex: container_name - replacement: container - - action: drop - regex: POD - sourceLabels: - - container - - action: keep - regex: kubelet;.+;(?:container_cpu_usage_seconds_total|container_memory_working_set_bytes|container_fs_usage_bytes|container_fs_limit_bytes) - sourceLabels: - - job - - container - - __name__ - - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.container - writeRelabelConfigs: - - action: keep - regex: kubelet;(?:container_network_receive_bytes_total|container_network_transmit_bytes_total) - sourceLabels: - - job - - __name__ - - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.node - writeRelabelConfigs: - - action: keep - regex: node-exporter;(?:node_load1|node_load5|node_load15|node_cpu_seconds_total) - sourceLabels: - - job - - __name__ - - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.operator.rule - writeRelabelConfigs: - - action: keep - regex: 'cluster_quantile:apiserver_request_latencies:histogram_quantile|instance:node_filesystem_usage:sum|instance:node_network_receive_bytes:rate:sum|cluster_quantile:scheduler_e2e_scheduling_latency:histogram_quantile|cluster_quantile:scheduler_scheduling_algorithm_latency:histogram_quantile|cluster_quantile:scheduler_binding_latency:histogram_quantile|node_namespace_pod:kube_pod_info:|:kube_pod_info_node_count:|node:node_num_cpu:sum|:node_cpu_utilisation:avg1m|node:node_cpu_utilisation:avg1m|node:cluster_cpu_utilisation:ratio|:node_cpu_saturation_load1:|node:node_cpu_saturation_load1:|:node_memory_utilisation:|node:node_memory_bytes_total:sum|node:node_memory_utilisation:ratio|node:cluster_memory_utilisation:ratio|:node_memory_swap_io_bytes:sum_rate|node:node_memory_utilisation:|node:node_memory_utilisation_2:|node:node_memory_swap_io_bytes:sum_rate|:node_disk_utilisation:avg_irate|node:node_disk_utilisation:avg_irate|:node_disk_saturation:avg_irate|node:node_disk_saturation:avg_irate|node:node_filesystem_usage:|node:node_filesystem_avail:|:node_net_utilisation:sum_irate|node:node_net_utilisation:sum_irate|:node_net_saturation:sum_irate|node:node_net_saturation:sum_irate|node:node_inodes_total:|node:node_inodes_free:' - sourceLabels: - - __name__ - - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.custom - writeRelabelConfigs: - - action: keep - regex: (?:custom_metric.*) - sourceLabels: - - __name__ - - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics - writeRelabelConfigs: - - action: keep - regex: (?:up|prometheus_remote_storage_.*|fluentd_.*|fluentbit.*|otelcol.*) - sourceLabels: - - __name__ + - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.state + writeRelabelConfigs: + - action: keep + regex: kube-state-metrics;(?:kube_statefulset_status_observed_generation|kube_statefulset_status_replicas|kube_statefulset_replicas|kube_statefulset_metadata_generation|kube_daemonset_status_current_number_scheduled|kube_daemonset_status_desired_number_scheduled|kube_daemonset_status_number_misscheduled|kube_daemonset_status_number_unavailable|kube_deployment_spec_replicas|kube_deployment_status_replicas_available|kube_deployment_status_replicas_unavailable|kube_node_info|kube_node_status_allocatable|kube_node_status_capacity|kube_node_status_condition|kube_pod_container_info|kube_pod_container_resource_requests|kube_pod_container_resource_limits|kube_pod_container_status_ready|kube_pod_container_status_terminated_reason|kube_pod_container_status_waiting_reason|kube_pod_container_status_restarts_total|kube_pod_status_phase) + sourceLabels: + - job + - __name__ + - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.controller-manager + writeRelabelConfigs: + - action: keep + regex: kubelet;cloudprovider_.*_api_request_duration_seconds.* + sourceLabels: + - job + - __name__ + - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.scheduler + writeRelabelConfigs: + - action: keep + regex: kube-scheduler;scheduler_(?:e2e_scheduling|binding|scheduling_algorithm)_latency_microseconds.* + sourceLabels: + - job + - __name__ + - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.apiserver + writeRelabelConfigs: + - action: keep + regex: apiserver;(?:apiserver_request_(?:count|total)|apiserver_request_(?:duration_seconds|latencies)_(?:count|sum)|apiserver_request_latencies_summary(?:|_count|_sum)|etcd_request_cache_(?:add|get)_(?:duration_seconds|latencies_summary)_(?:count|sum)|etcd_helper_cache_(?:hit|miss)_(?:count|total)) + sourceLabels: + - job + - __name__ + - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.kubelet + writeRelabelConfigs: + - action: keep + regex: kubelet;(?:kubelet_docker_operations_errors(?:|_total)|kubelet_(?:docker|runtime)_operations_duration_seconds_(?:count|sum)|kubelet_running_(?:container|pod)_count|kubelet_(:?docker|runtime)_operations_latency_microseconds(?:|_count|_sum)) + sourceLabels: + - job + - __name__ + - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.container + writeRelabelConfigs: + - action: labelmap + regex: container_name + replacement: container + - action: drop + regex: POD + sourceLabels: + - container + - action: keep + regex: kubelet;.+;(?:container_cpu_usage_seconds_total|container_memory_working_set_bytes|container_fs_usage_bytes|container_fs_limit_bytes) + sourceLabels: + - job + - container + - __name__ + - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.container + writeRelabelConfigs: + - action: keep + regex: kubelet;(?:container_network_receive_bytes_total|container_network_transmit_bytes_total) + sourceLabels: + - job + - __name__ + - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.node + writeRelabelConfigs: + - action: keep + regex: node-exporter;(?:node_load1|node_load5|node_load15|node_cpu_seconds_total) + sourceLabels: + - job + - __name__ + - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.operator.rule + writeRelabelConfigs: + - action: keep + regex: 'cluster_quantile:apiserver_request_latencies:histogram_quantile|instance:node_filesystem_usage:sum|instance:node_network_receive_bytes:rate:sum|cluster_quantile:scheduler_e2e_scheduling_latency:histogram_quantile|cluster_quantile:scheduler_scheduling_algorithm_latency:histogram_quantile|cluster_quantile:scheduler_binding_latency:histogram_quantile|node_namespace_pod:kube_pod_info:|:kube_pod_info_node_count:|node:node_num_cpu:sum|:node_cpu_utilisation:avg1m|node:node_cpu_utilisation:avg1m|node:cluster_cpu_utilisation:ratio|:node_cpu_saturation_load1:|node:node_cpu_saturation_load1:|:node_memory_utilisation:|node:node_memory_bytes_total:sum|node:node_memory_utilisation:ratio|node:cluster_memory_utilisation:ratio|:node_memory_swap_io_bytes:sum_rate|node:node_memory_utilisation:|node:node_memory_utilisation_2:|node:node_memory_swap_io_bytes:sum_rate|:node_disk_utilisation:avg_irate|node:node_disk_utilisation:avg_irate|:node_disk_saturation:avg_irate|node:node_disk_saturation:avg_irate|node:node_filesystem_usage:|node:node_filesystem_avail:|:node_net_utilisation:sum_irate|node:node_net_utilisation:sum_irate|:node_net_saturation:sum_irate|node:node_net_saturation:sum_irate|node:node_inodes_total:|node:node_inodes_free:' + sourceLabels: + - __name__ + - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.custom + writeRelabelConfigs: + - action: keep + regex: (?:custom_metric.*) + sourceLabels: + - __name__ + - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics + writeRelabelConfigs: + - action: keep + regex: (?:up|prometheus_remote_storage_.*|fluentd_.*|fluentbit.*|otelcol.*) + sourceLabels: + - __name__ diff --git a/tests/upgrade_script/static/prometheus_remote_write_modified.output.yaml b/tests/upgrade_script/static/prometheus_remote_write_modified.output.yaml index ab399800b1..c08cb32134 100644 --- a/tests/upgrade_script/static/prometheus_remote_write_modified.output.yaml +++ b/tests/upgrade_script/static/prometheus_remote_write_modified.output.yaml @@ -2,85 +2,85 @@ prometheus-operator: prometheus: prometheusSpec: remoteWrite: - - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.state - writeRelabelConfigs: - - action: keep - regex: kube-state-metrics;(?:kube_statefulset_status_observed_generation|kube_statefulset_status_replicas|kube_statefulset_replicas|kube_statefulset_metadata_generation|kube_daemonset_status_current_number_scheduled|kube_daemonset_status_desired_number_scheduled|kube_daemonset_status_number_misscheduled|kube_daemonset_status_number_unavailable|kube_daemonset_metadata_generation|kube_deployment_metadata_generation|kube_deployment_spec_paused|kube_deployment_spec_replicas|kube_deployment_spec_strategy_rollingupdate_max_unavailable|kube_deployment_status_replicas_available|kube_deployment_status_observed_generation|kube_deployment_status_replicas_unavailable|kube_node_info|kube_node_spec_unschedulable|kube_node_status_allocatable|kube_node_status_capacity|kube_node_status_condition|kube_pod_container_info|kube_pod_container_resource_requests|kube_pod_container_resource_limits|kube_pod_container_status_ready|kube_pod_container_status_terminated_reason|kube_pod_container_status_waiting_reason|kube_pod_container_status_restarts_total|kube_pod_status_phase|custom) - sourceLabels: - - job - - __name__ - - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.controller-manager - writeRelabelConfigs: - - action: keep - regex: kubelet;cloudprovider_.*_api_request_duration_seconds.* - sourceLabels: - - job - - __name__ - - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.scheduler - writeRelabelConfigs: - - action: keep - regex: kube-scheduler;scheduler_(?:e2e_scheduling|binding|scheduling_algorithm)_latency_microseconds.* - sourceLabels: - - job - - __name__ - - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.apiserver - writeRelabelConfigs: - - action: keep - regex: apiserver;(?:apiserver_request_(?:count|total)|apiserver_request_(?:duration_seconds|latencies)_(?:count|sum)|apiserver_request_latencies_summary(?:|_count|_sum)|etcd_request_cache_(?:add|get)_(?:duration_seconds|latencies_summary)_(?:count|sum)|etcd_helper_cache_(?:hit|miss)_(?:count|total)) - sourceLabels: - - job - - __name__ - - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.kubelet - writeRelabelConfigs: - - action: keep - regex: kubelet;(?:kubelet_docker_operations_errors|kubelet_docker_operations_latency_microseconds|kubelet_running_container_count|kubelet_running_pod_count|kubelet_runtime_operations_latency_microseconds.*|custom) - sourceLabels: - - job - - __name__ - - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.container - writeRelabelConfigs: - - action: labelmap - regex: container_name - replacement: container - - action: drop - regex: POD - sourceLabels: - - container - - action: keep - regex: kubelet;.+;(?:container_cpu_load_average_10s|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_cfs_throttled_seconds_total|container_memory_usage_bytes|container_memory_swap|container_memory_working_set_bytes|container_spec_memory_limit_bytes|container_spec_memory_swap_limit_bytes|container_spec_memory_reservation_limit_bytes|container_spec_cpu_quota|container_spec_cpu_period|container_fs_usage_bytes|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_writes_bytes_total|custom) - sourceLabels: - - job - - container - - __name__ - - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.container - writeRelabelConfigs: - - action: keep - regex: kubelet;(?:container_network_receive_bytes_total|container_network_transmit_bytes_total) - sourceLabels: - - job - - __name__ - - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.node - writeRelabelConfigs: - - action: keep - regex: node-exporter;(?:node_load1|node_load5|node_load15|node_cpu_seconds_total) - sourceLabels: - - job - - __name__ - - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.operator.rule - writeRelabelConfigs: - - action: keep - regex: 'cluster_quantile:apiserver_request_latencies:histogram_quantile|instance:node_filesystem_usage:sum|instance:node_network_receive_bytes:rate:sum|cluster_quantile:scheduler_e2e_scheduling_latency:histogram_quantile|cluster_quantile:scheduler_scheduling_algorithm_latency:histogram_quantile|cluster_quantile:scheduler_binding_latency:histogram_quantile|node_namespace_pod:kube_pod_info:|:kube_pod_info_node_count:|node:node_num_cpu:sum|:node_cpu_utilisation:avg1m|node:node_cpu_utilisation:avg1m|node:cluster_cpu_utilisation:ratio|:node_cpu_saturation_load1:|node:node_cpu_saturation_load1:|:node_memory_utilisation:|node:node_memory_bytes_total:sum|node:node_memory_utilisation:ratio|node:cluster_memory_utilisation:ratio|:node_memory_swap_io_bytes:sum_rate|node:node_memory_utilisation:|node:node_memory_utilisation_2:|node:node_memory_swap_io_bytes:sum_rate|:node_disk_utilisation:avg_irate|node:node_disk_utilisation:avg_irate|:node_disk_saturation:avg_irate|node:node_disk_saturation:avg_irate|node:node_filesystem_usage:|node:node_filesystem_avail:|:node_net_utilisation:sum_irate|node:node_net_utilisation:sum_irate|:node_net_saturation:sum_irate|node:node_net_saturation:sum_irate|node:node_inodes_total:|node:node_inodes_free:' - sourceLabels: - - __name__ - - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.custom - writeRelabelConfigs: - - action: keep - regex: (?:custom_metric.*) - sourceLabels: - - __name__ - - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics - writeRelabelConfigs: - - action: keep - regex: (?:up|prometheus_remote_storage_.*|fluentd_.*|fluentbit.*|otelcol.*) - sourceLabels: - - __name__ + - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.state + writeRelabelConfigs: + - action: keep + regex: kube-state-metrics;(?:kube_statefulset_status_observed_generation|kube_statefulset_status_replicas|kube_statefulset_replicas|kube_statefulset_metadata_generation|kube_daemonset_status_current_number_scheduled|kube_daemonset_status_desired_number_scheduled|kube_daemonset_status_number_misscheduled|kube_daemonset_status_number_unavailable|kube_daemonset_metadata_generation|kube_deployment_metadata_generation|kube_deployment_spec_paused|kube_deployment_spec_replicas|kube_deployment_spec_strategy_rollingupdate_max_unavailable|kube_deployment_status_replicas_available|kube_deployment_status_observed_generation|kube_deployment_status_replicas_unavailable|kube_node_info|kube_node_spec_unschedulable|kube_node_status_allocatable|kube_node_status_capacity|kube_node_status_condition|kube_pod_container_info|kube_pod_container_resource_requests|kube_pod_container_resource_limits|kube_pod_container_status_ready|kube_pod_container_status_terminated_reason|kube_pod_container_status_waiting_reason|kube_pod_container_status_restarts_total|kube_pod_status_phase|custom) + sourceLabels: + - job + - __name__ + - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.controller-manager + writeRelabelConfigs: + - action: keep + regex: kubelet;cloudprovider_.*_api_request_duration_seconds.* + sourceLabels: + - job + - __name__ + - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.scheduler + writeRelabelConfigs: + - action: keep + regex: kube-scheduler;scheduler_(?:e2e_scheduling|binding|scheduling_algorithm)_latency_microseconds.* + sourceLabels: + - job + - __name__ + - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.apiserver + writeRelabelConfigs: + - action: keep + regex: apiserver;(?:apiserver_request_(?:count|total)|apiserver_request_(?:duration_seconds|latencies)_(?:count|sum)|apiserver_request_latencies_summary(?:|_count|_sum)|etcd_request_cache_(?:add|get)_(?:duration_seconds|latencies_summary)_(?:count|sum)|etcd_helper_cache_(?:hit|miss)_(?:count|total)) + sourceLabels: + - job + - __name__ + - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.kubelet + writeRelabelConfigs: + - action: keep + regex: kubelet;(?:kubelet_docker_operations_errors|kubelet_docker_operations_latency_microseconds|kubelet_running_container_count|kubelet_running_pod_count|kubelet_runtime_operations_latency_microseconds.*|custom) + sourceLabels: + - job + - __name__ + - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.container + writeRelabelConfigs: + - action: labelmap + regex: container_name + replacement: container + - action: drop + regex: POD + sourceLabels: + - container + - action: keep + regex: kubelet;.+;(?:container_cpu_load_average_10s|container_cpu_system_seconds_total|container_cpu_usage_seconds_total|container_cpu_cfs_throttled_seconds_total|container_memory_usage_bytes|container_memory_swap|container_memory_working_set_bytes|container_spec_memory_limit_bytes|container_spec_memory_swap_limit_bytes|container_spec_memory_reservation_limit_bytes|container_spec_cpu_quota|container_spec_cpu_period|container_fs_usage_bytes|container_fs_limit_bytes|container_fs_reads_bytes_total|container_fs_writes_bytes_total|custom) + sourceLabels: + - job + - container + - __name__ + - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.container + writeRelabelConfigs: + - action: keep + regex: kubelet;(?:container_network_receive_bytes_total|container_network_transmit_bytes_total) + sourceLabels: + - job + - __name__ + - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.node + writeRelabelConfigs: + - action: keep + regex: node-exporter;(?:node_load1|node_load5|node_load15|node_cpu_seconds_total) + sourceLabels: + - job + - __name__ + - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.operator.rule + writeRelabelConfigs: + - action: keep + regex: 'cluster_quantile:apiserver_request_latencies:histogram_quantile|instance:node_filesystem_usage:sum|instance:node_network_receive_bytes:rate:sum|cluster_quantile:scheduler_e2e_scheduling_latency:histogram_quantile|cluster_quantile:scheduler_scheduling_algorithm_latency:histogram_quantile|cluster_quantile:scheduler_binding_latency:histogram_quantile|node_namespace_pod:kube_pod_info:|:kube_pod_info_node_count:|node:node_num_cpu:sum|:node_cpu_utilisation:avg1m|node:node_cpu_utilisation:avg1m|node:cluster_cpu_utilisation:ratio|:node_cpu_saturation_load1:|node:node_cpu_saturation_load1:|:node_memory_utilisation:|node:node_memory_bytes_total:sum|node:node_memory_utilisation:ratio|node:cluster_memory_utilisation:ratio|:node_memory_swap_io_bytes:sum_rate|node:node_memory_utilisation:|node:node_memory_utilisation_2:|node:node_memory_swap_io_bytes:sum_rate|:node_disk_utilisation:avg_irate|node:node_disk_utilisation:avg_irate|:node_disk_saturation:avg_irate|node:node_disk_saturation:avg_irate|node:node_filesystem_usage:|node:node_filesystem_avail:|:node_net_utilisation:sum_irate|node:node_net_utilisation:sum_irate|:node_net_saturation:sum_irate|node:node_net_saturation:sum_irate|node:node_inodes_total:|node:node_inodes_free:' + sourceLabels: + - __name__ + - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics.custom + writeRelabelConfigs: + - action: keep + regex: (?:custom_metric.*) + sourceLabels: + - __name__ + - url: http://$(CHART).$(NAMESPACE).svc.cluster.local:9888/prometheus.metrics + writeRelabelConfigs: + - action: keep + regex: (?:up|prometheus_remote_storage_.*|fluentd_.*|fluentbit.*|otelcol.*) + sourceLabels: + - __name__ diff --git a/tests/upgrade_script/static/prometheus_service_monitors.output.yaml b/tests/upgrade_script/static/prometheus_service_monitors.output.yaml index 34dbe60ed7..1eae716a52 100644 --- a/tests/upgrade_script/static/prometheus_service_monitors.output.yaml +++ b/tests/upgrade_script/static/prometheus_service_monitors.output.yaml @@ -1,70 +1,70 @@ prometheus-operator: prometheus: additionalServiceMonitors: - - name: my-service-monitor - additionalLabels: - app: some-app - endpoints: - - port: 8080 - namespaceSelector: - matchNames: - - test-app - selector: - matchLabels: - test: service-monitors - - name: collection-fluent-bit - additionalLabels: - app: collection-fluent-bit - endpoints: - - port: metrics - path: /api/v1/metrics/prometheus - namespaceSelector: - matchNames: - - sumologic - selector: - matchLabels: - app: fluent-bit - - name: collection-sumologic-otelcol - additionalLabels: - app: collection-sumologic-otelcol - endpoints: - - port: metrics - namespaceSelector: - matchNames: - - sumologic - selector: - matchLabels: + - name: my-service-monitor + additionalLabels: + app: some-app + endpoints: + - port: 8080 + namespaceSelector: + matchNames: + - test-app + selector: + matchLabels: + test: service-monitors + - name: collection-fluent-bit + additionalLabels: + app: collection-fluent-bit + endpoints: + - port: metrics + path: /api/v1/metrics/prometheus + namespaceSelector: + matchNames: + - sumologic + selector: + matchLabels: + app: fluent-bit + - name: collection-sumologic-otelcol + additionalLabels: app: collection-sumologic-otelcol - - name: collection-sumologic-fluentd-logs - additionalLabels: - app: collection-sumologic-fluentd-logs - endpoints: - - port: metrics - namespaceSelector: - matchNames: - - sumologic - selector: - matchLabels: + endpoints: + - port: metrics + namespaceSelector: + matchNames: + - sumologic + selector: + matchLabels: + app: collection-sumologic-otelcol + - name: collection-sumologic-fluentd-logs + additionalLabels: app: collection-sumologic-fluentd-logs - - name: collection-sumologic-fluentd-metrics - additionalLabels: - app: collection-sumologic-fluentd-metrics - endpoints: - - port: metrics - namespaceSelector: - matchNames: - - sumologic - selector: - matchLabels: + endpoints: + - port: metrics + namespaceSelector: + matchNames: + - sumologic + selector: + matchLabels: + app: collection-sumologic-fluentd-logs + - name: collection-sumologic-fluentd-metrics + additionalLabels: app: collection-sumologic-fluentd-metrics - - name: collection-sumologic-fluentd-events - additionalLabels: - app: collection-sumologic-fluentd-events - endpoints: - - port: metrics - namespaceSelector: - matchNames: - - sumologic - selector: - matchLabels: + endpoints: + - port: metrics + namespaceSelector: + matchNames: + - sumologic + selector: + matchLabels: + app: collection-sumologic-fluentd-metrics + - name: collection-sumologic-fluentd-events + additionalLabels: app: collection-sumologic-fluentd-events + endpoints: + - port: metrics + namespaceSelector: + matchNames: + - sumologic + selector: + matchLabels: + app: collection-sumologic-fluentd-events diff --git a/tests/upgrade_v2_script/static/image_rename.input.yaml b/tests/upgrade_v2_script/static/image_rename.input.yaml new file mode 100644 index 0000000000..0dc157a70e --- /dev/null +++ b/tests/upgrade_v2_script/static/image_rename.input.yaml @@ -0,0 +1,29 @@ +image: + repository: sumologic/kubernetes-fluentd + tag: 1.3.0 + pullPolicy: IfNotPresent + +nameOverride: "" + +sumologic: + ### Setup + ## If enabled, a pre-install hook will create Collector and Sources in Sumo Logic + setupEnabled: true + ## If enabled, a pre-delete hook will destroy Collector in Sumo Logic + cleanUpEnabled: false + ## Sumo access ID + accessId: "dummy" + ## Sumo access key + accessKey: "dummy" + +fluentd: + ## Sets the fluentd log level. The default log level, if not specified, is info. + ## Sumo will only ingest the error log level and some specific warnings, the info logs can be seen in kubectl logs. + ## ref: https://docs.fluentd.org/deployment/logging + logLevel: "info" + ## to ingest all fluentd logs, turn the logLevelFilter to false + logLevelFilter: true + ## Enable and set compression encoding for fluentd output plugin + compression: + enabled: true + encoding: gzip diff --git a/tests/upgrade_v2_script/static/image_rename.log b/tests/upgrade_v2_script/static/image_rename.log new file mode 100644 index 0000000000..d7269f1c00 --- /dev/null +++ b/tests/upgrade_v2_script/static/image_rename.log @@ -0,0 +1,16 @@ +[INFO] Mapping image.repository into: +[INFO] - fluentd.image.repository +[INFO] - sumologic.setup.job.image.repository +[INFO] Mapping image.tag into: +[INFO] - fluentd.image.tag +[INFO] - sumologic.setup.job.image.tag +[INFO] Mapping image.pullPolicy into: +[INFO] - fluentd.image.pullPolicy +[INFO] - sumologic.setup.job.image.pullPolicy + +[INFO] Updating setup hooks (sumologic.setup.*.annotations[helm.sh/hook]) to 'pre-install,pre-upgrade' +[INFO] Migrating from image.tag '1.3.0' to sumologic.setup.job.image.tag '2.0.0' +[INFO] Migrating from image.tag '1.3.0' to fluentd.image.tag '2.0.0' + +Thank you for upgrading to v2.0.0 of the Sumo Logic Kubernetes Collection Helm chart. +A new yaml file has been generated for you. Please check the current directory for new_values.yaml. diff --git a/tests/upgrade_v2_script/static/image_rename.output.yaml b/tests/upgrade_v2_script/static/image_rename.output.yaml new file mode 100644 index 0000000000..bf7a6c4317 --- /dev/null +++ b/tests/upgrade_v2_script/static/image_rename.output.yaml @@ -0,0 +1,24 @@ +fluentd: + image: + repository: sumologic/kubernetes-fluentd + tag: 2.0.0 + pullPolicy: IfNotPresent + logLevel: info + logLevelFilter: true + compression: + enabled: true + encoding: gzip +sumologic: + setup: + job: + image: + repository: sumologic/kubernetes-fluentd + tag: 2.0.0 + pullPolicy: IfNotPresent + annotations: + helm.sh/hook: pre-install,pre-upgrade + setupEnabled: true + cleanUpEnabled: false + accessId: dummy + accessKey: dummy +nameOverride: diff --git a/tests/upgrade_v2_script/static/prometheus_operator_valid.log b/tests/upgrade_v2_script/static/prometheus_operator_valid.log index ff5ecd5843..6629068fb0 100644 --- a/tests/upgrade_v2_script/static/prometheus_operator_valid.log +++ b/tests/upgrade_v2_script/static/prometheus_operator_valid.log @@ -1,8 +1,7 @@ [INFO] Mapping prometheus-operator.prometheusOperator.tlsProxy.enabled into kube-prometheus-stack.prometheusOperator.tls.enabled -[INFO] Migrating prometheus-config-reloader to config-reloader +[INFO] Migrating prometheus-config-reloader container to config-reloader in prometheusSpec [INFO] Migrating from prometheus-operator to kube-prometheus-stack -[INFO] falco will be disabled. Change "falco.enabled" to "true" if you want to enable it Thank you for upgrading to v2.0.0 of the Sumo Logic Kubernetes Collection Helm chart. A new yaml file has been generated for you. Please check the current directory for new_values.yaml. diff --git a/vagrant/Makefile b/vagrant/Makefile index db6fdb2383..84f13f9b36 100755 --- a/vagrant/Makefile +++ b/vagrant/Makefile @@ -28,8 +28,8 @@ tag-and-push-local: docker tag sumologic/kubernetes-fluentd:local localhost:32000/sumologic/kubernetes-fluentd:local-${timestamp} docker push localhost:32000/sumologic/kubernetes-fluentd:local-${timestamp} touch ${local_values_file} - yq w -i ${local_values_file} image.repository localhost:32000/sumologic/kubernetes-fluentd - yq w -i ${local_values_file} image.tag local-${timestamp} + yq w -i ${local_values_file} fluentd.image.repository localhost:32000/sumologic/kubernetes-fluentd + yq w -i ${local_values_file} fluentd.image.tag local-${timestamp} generate-local-config: ${vagrant_scripts_dir}generate-local-config.sh diff --git a/vagrant/provision.sh b/vagrant/provision.sh index dd512c972d..e2a627fdd8 100644 --- a/vagrant/provision.sh +++ b/vagrant/provision.sh @@ -51,11 +51,9 @@ ln -s /usr/bin/helm3 /usr/bin/helm usermod -a -G microk8s vagrant # install yq with access to file structure -curl https://github.com/mikefarah/yq/releases/download/3.2.1/yq_linux_amd64 -L -o /usr/local/bin/yq-3.2.1 -chmod +x /usr/local/bin/yq-3.2.1 -curl https://github.com/mikefarah/yq/releases/download/3.3.0/yq_linux_amd64 -L -o /usr/local/bin/yq-3.3.0 -chmod +x /usr/local/bin/yq-3.3.0 -ln -s /usr/local/bin/yq-3.3.0 /usr/local/bin/yq +curl https://github.com/mikefarah/yq/releases/download/3.4.1/yq_linux_amd64 -L -o /usr/local/bin/yq-3.4.1 +chmod +x /usr/local/bin/yq-3.4.1 +ln -s /usr/local/bin/yq-3.4.1 /usr/local/bin/yq # Install docker curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -