From 3829d572b2a93abe13d5325adf6c378c570ae894 Mon Sep 17 00:00:00 2001 From: Mateusz Starzec <87757793+starzu-sumo@users.noreply.github.com> Date: Fri, 6 May 2022 14:20:51 +0200 Subject: [PATCH] feat: sumo dashboards installation as a part of the setup job (#2268) * feat: k8s dashboards installation - chart values * feat: k8s dashboards installation - scripts * feat: k8s dashboards installation - tests * feat: k8s dashboards installation - changelog * feat: k8s dashboards installation - notes * feat: k8s dashboards installation - disabled dashboards test * feat: k8s dashboards - remove unused variable * feat: k8s dashboards - install the dashboards inside "Sumo Logic Integrations" * feat: k8s dashboards - shellcheck fixes * feat: k8s dashboards - shellcheck fixes (tests adjustment) * feat: k8s dashboards - include dashboards.sh in shellcheck.sh * feat: shellcheck - more info in logs * feat: k8s dashboards - adjust tests after merge --- CHANGELOG.md | 2 + ci/shellcheck.sh | 13 +- deploy/helm/sumologic/README.md | 707 +++++++++--------- .../helm/sumologic/conf/setup/dashboards.sh | 139 ++++ deploy/helm/sumologic/conf/setup/monitors.sh | 2 +- deploy/helm/sumologic/conf/setup/setup.sh | 9 + deploy/helm/sumologic/templates/NOTES.txt | 12 +- deploy/helm/sumologic/values.yaml | 4 + .../terraform/static/all_fields.output.yaml | 145 +++- .../static/collector_fields.output.yaml | 145 +++- .../static/conditional_sources.output.yaml | 145 +++- .../helm/terraform/static/custom.output.yaml | 145 +++- .../helm/terraform/static/default.output.yaml | 145 +++- .../disable_default_metrics.output.yaml | 145 +++- .../static/disabled_dashboards.input.yaml | 4 + .../static/disabled_dashboards.output.yaml | 636 ++++++++++++++++ .../static/disabled_monitors.output.yaml | 145 +++- ...itors_with_email_notifications.output.yaml | 145 +++- .../monitors_with_single_email.output.yaml | 145 +++- .../static/strip_extrapolation.output.yaml | 145 +++- .../helm/terraform/static/traces.output.yaml | 145 +++- 21 files changed, 2752 insertions(+), 371 deletions(-) create mode 100755 deploy/helm/sumologic/conf/setup/dashboards.sh create mode 100644 tests/helm/terraform/static/disabled_dashboards.input.yaml create mode 100644 tests/helm/terraform/static/disabled_dashboards.output.yaml diff --git a/CHANGELOG.md b/CHANGELOG.md index 3de155b11b..9f82214ff1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,6 +11,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 - feat(fluentd): expose extra configuration for fluentd output plugin [#2244][#2244] - feat(monitors): the Sumo Logic monitors installation as part of the setup job [#2250][#2250], [#2274][#2274] +- feat(dashboards): the Sumo Logic dashboards installation as part of the setup job [#2268][#2268] ### Changed @@ -35,6 +36,7 @@ and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0 [#2246]: https://github.com/SumoLogic/sumologic-kubernetes-collection/pull/2246 [#2250]: https://github.com/SumoLogic/sumologic-kubernetes-collection/pull/2250 [#2251]: https://github.com/SumoLogic/sumologic-kubernetes-collection/pull/2251 +[#2268]: https://github.com/SumoLogic/sumologic-kubernetes-collection/pull/2268 [#2272]: https://github.com/SumoLogic/sumologic-kubernetes-collection/pull/2272 [#2274]: https://github.com/SumoLogic/sumologic-kubernetes-collection/pull/2274 [Unreleased]: https://github.com/SumoLogic/sumologic-kubernetes-collection/compare/v2.7.1...main diff --git a/ci/shellcheck.sh b/ci/shellcheck.sh index 4d8a6fd75d..1e42aa2750 100755 --- a/ci/shellcheck.sh +++ b/ci/shellcheck.sh @@ -13,20 +13,27 @@ find . ! -path '*deploy/helm/sumologic/conf/setup/setup.sh' ! -path '*deploy/hel find . -path '*tests/helm/terraform/static/*.output.yaml' -type 'f' -print | while read -r file; do # Run tests in their own context - echo "Checking ${file} with shellcheck" + echo "Checking ${file} (setup.sh) with shellcheck" yq r "${file}" "data[setup.sh]" | shellcheck --enable all --external-sources --exclude SC2155 - done find . -path '*tests/helm/terraform/static/*.output.yaml' -type 'f' -print | while read -r file; do # Run tests in their own context - echo "Checking ${file} with shellcheck" + echo "Checking ${file} (monitors.sh) with shellcheck" yq r "${file}" "data[monitors.sh]" | shellcheck --enable all --external-sources --exclude SC2155 - done +find . -path '*tests/helm/terraform/static/*.output.yaml' -type 'f' -print | + while read -r file; do + # Run tests in their own context + echo "Checking ${file} (dashboards.sh) with shellcheck" + yq r "${file}" "data[dashboards.sh]" | shellcheck --enable all --external-sources --exclude SC2155 - + done + find . -path '*tests/helm/terraform_custom/static/*.output.yaml' ! -path "./tests/helm/terraform_custom/static/empty.output.yaml" -type 'f' -print | while read -r file; do # Run tests in their own context - echo "Checking ${file} with shellcheck" + echo "Checking ${file} (custom_setup.sh) with shellcheck" yq r "${file}" "data[custom_setup.sh]" | shellcheck --enable all --external-sources --exclude SC2155 - done diff --git a/deploy/helm/sumologic/README.md b/deploy/helm/sumologic/README.md index b91f6c5196..d954cd48f8 100644 --- a/deploy/helm/sumologic/README.md +++ b/deploy/helm/sumologic/README.md @@ -12,356 +12,357 @@ To see all available configuration for our sub-charts, please refer to their doc The following table lists the configurable parameters of the Sumo Logic chart and their default values. -| Parameter | Description | Default | -|---------------------------------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| -| `nameOverride` | Used to override the Chart name. | `Nil` | -| `fullnameOverride` | Used to override the chart's full name. | `Nil` | -| `sumologic.setupEnabled` | If enabled, a pre-install hook will create Collector and Sources in Sumo Logic. | `true` | -| `sumologic.cleanupEnabled` | If enabled, a pre-delete hook will destroy Kubernetes secret and Sumo Logic Collector. | `false` | -| `sumologic.logs.enabled` | Set the enabled flag to false for disabling logs ingestion altogether. | `true` | -| `sumologic.metrics.enabled` | Set the enabled flag to false for disabling metrics ingestion altogether. | `true` | -| `sumologic.logs.fields` | Fields to be created at Sumo Logic to ensure logs are tagged with relevant metadata. [Sumo Logic help](https://help.sumologic.com/Manage/Fields#Manage_fields) | `{}` | -| `sumologic.logs.metadata.provider` | Set provider to use for logs forwarding and metadata enrichment. Can be either otelcol or fluentd. | `fluentd` | -| `sumologic.metrics.metadata.provider` | Set provider to use for metrics forwarding and metadata enrichment. Can be either otelcol or fluentd. | `fluentd` | -| `sumologic.metrics.remoteWriteProxy.enabled` | Enable a load balancing proxy for Prometheus remote writes. [See docs/Best_Practices.md for more information.](../../docs/Best_Practices.md) | `fluentd` | -| `sumologic.metrics.remoteWriteProxy.config.clientBodyBufferSize` | See the [nginx documentation](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size). Increase if you've also increased samples per send in Prometheus remote write. | `fluentd` | -| `sumologic.metrics.remoteWriteProxy.config.workerCountAutotune` | This feature autodetects how much CPU is assigned to the nginx instance and setsthe right amount of workers based on that. Disable to use the default of 8 workers. | | -| `sumologic.metrics.remoteWriteProxy.replicaCount` | Number of replicas in the remote write proxy deployment. | `fluentd` | -| `sumologic.metrics.remoteWriteProxy.image` | Nginx docker image for the remote write proxy. | `fluentd` | -| `sumologic.metrics.remoteWriteProxy.resources` | Resource requests and limits for the remote write proxy container. | `fluentd` | -| `sumologic.metrics.remoteWriteProxy.livenessProbe` | Liveness probe settings for the remote write proxy container. | `fluentd` | -| `sumologic.metrics.remoteWriteProxy.readinessProbe` | Readiness probe settings for the remote write proxy container. | `fluentd` | -| `sumologic.metrics.remoteWriteProxy.securityContext` | The securityContext configuration for the remote write proxy. | `{}` | -| `sumologic.metrics.remoteWriteProxy.nodeSelector` | Node selector for the remote write proxy deployment. | `{}` | -| `sumologic.metrics.remoteWriteProxy.tolerations` | Tolerations for the remote write proxy deployment. | `[]` | -| `sumologic.metrics.remoteWriteProxy.affinity` | Affinity for the remote write proxy deployment. | `{}` | -| `sumologic.metrics.remoteWriteProxy.priorityClassName` | Priority class name for the remote write proxy deployment. | `Nil` | -| `sumologic.metrics.remoteWriteProxy.podLabels` | Additional labels for the remote write proxy container. | `{}` | -| `sumologic.metrics.remoteWriteProxy.podAnnotations` | Additional annotations for for the remote write proxy container. | `{}` | -| `sumologic.traces.enabled` | Set the enabled flag to true to enable tracing ingestion. _Tracing must be enabled for the account first. Please contact your Sumo representative for activation details_ | `false` | -| `sumologic.envFromSecret` | If enabled, accessId and accessKey will be sourced from Secret Name given. Be sure to include at least the following env variables in your secret (1) SUMOLOGIC_ACCESSID, (2) SUMOLOGIC_ACCESSKEY | `sumo-api-secret` | -| `sumologic.accessId` | Sumo access ID. | `Nil` | -| `sumologic.accessKey` | Sumo access key. | `Nil` | -| `sumologic.endpoint` | Sumo API endpoint; Leave blank for automatic endpoint discovery and redirection. | `Nil` | -| `sumologic.collectionMonitoring` | If set to `false`, excludes all metrics which name matches `/^up\|^prometheus_remote_storage_.*\|^fluentd_.*\|^fluentbit.*\|^otelcol.*$/` regexp and excludes all container logs and traces coming from collection namespace. | `false` | -| `sumologic.collectorName` | The name of the Sumo Logic collector that will be created in the SetUp job. Defaults to `clusterName` if not specified. | `Nil` | -| `sumologic.clusterName` | An identifier for the Kubernetes cluster. Whitespaces in the cluster name will be replaced with dashes. | `kubernetes` | -| `sumologic.collector.sources` | Configuration of HTTP sources. [See docs/Terraform.md for more information](../../docs/Terraform.md).. | [sumologic.collector.sources in values.yaml](https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/v2.0.0/deploy/helm/sumologic/values.yaml#L114-L164) | -| `sumologic.httpProxy` | HTTP proxy URL | `Nil` | -| `sumologic.httpsProxy` | HTTPS proxy URL | `Nil` | -| `sumologic.noProxy` | List of comma separated hostnames which should be excluded from the proxy | `kubernetes.default.svc` | -| `sumologic.pullSecrets` | Optional list of secrets that will be used for pulling images for Sumo Logic's deployments and statefulsets. | `Nil` | -| `sumologic.podLabels` | Additional labels for the pods. | `{}` | -| `sumologic.podAnnotations` | Additional annotations for the pods. | `{}` | -| `sumologic.scc.create` | Create OpenShift's Security Context Constraint | `false` | -| `sumologic.serviceAccount.annotations` | Add custom annotations to sumologic serviceAccounts | `{}` | -| `sumologic.setup.job.pullSecrets` | Optional list of secrets that will be used for pulling images for Sumo Logic's setup job. | `Nil` | -| `sumologic.setup.job.podLabels` | Additional labels for the setup Job pod. | `{}` | -| `sumologic.setup.job.podAnnotations` | Additional annotations for the setup Job pod. | `{}` | -| `sumologic.setup.job.image.repository` | Image repository for Sumo Logic setup job docker container. | `sumologic/kubernetes-fluentd` | -| `sumologic.setup.job.image.tag` | Image tag for Sumo Logic setup job docker container. | `1.3.0` | -| `sumologic.setup.job.image.pullPolicy` | Image pullPolicy for Sumo Logic docker container. | `IfNotPresent` | -| `sumologic.setup.monitors.enabled` | If enabled, a pre-install hook will create k8s monitors in Sumo Logic. | `true` | -| `sumologic.setup.monitors.monitorStatus` | The installed monitors default status: enabled/disabled. | `enabled` | -| `sumologic.setup.monitors.notificationEmails` | A list of emails to send notifications from monitors. | `[]` | -| `fluentd.image.repository` | Image repository for Sumo Logic docker container. | `sumologic/kubernetes-fluentd` | -| `fluentd.image.tag` | Image tag for Sumo Logic docker container. | `1.3.0` | -| `fluentd.image.pullPolicy` | Image pullPolicy for Sumo Logic docker container. | `IfNotPresent` | -| `fluentd.logLevelFilter` | Do not send fluentd logs if set to `true`. | `true` | -| `fluentd.additionalPlugins` | Additional Fluentd plugins to install from RubyGems. Please see our [documentation](../../docs/Additional_Fluentd_Plugins.md) for more information. | `[]` | -| `fluentd.compression.enabled` | Flag to control if data is sent to Sumo Logic compressed or not | `true` | -| `fluentd.compression.encoding` | Specifies which encoding should be used to compress data (either `gzip` or `deflate`) | `gzip` | -| `fluentd.logLevel` | Sets the fluentd log level. The default log level, if not specified, is info. Sumo will only ingest the error log level and some specific warnings, the info logs can be seen in kubectl logs. | `info` | -| `fluentd.verifySsl` | Verify SumoLogic HTTPS certificates. | `true` | -| `fluentd.proxyUri` | Proxy URI for sumologic output plugin. | `Nil` | -| `fluentd.securityContext` | The securityContext configuration for Fluentd | `{"fsGroup":999}` | -| `fluentd.podLabels` | Additional labels for all fluentd pods | `{}` | -| `fluentd.pvcLabels` | Additional labels for all fluentd PVCs | `{}` | -| `fluentd.podAnnotations` | Additional annotations for all fluentd pods | `{}` | -| `fluentd.podSecurityPolicy.create` | If true, create & use `podSecurityPolicy` for fluentd resources | `false` | -| `fluentd.persistence.enabled` | Persist data to a persistent volume; When enabled, fluentd uses the file buffer instead of memory buffer. After changing this value follow steps described in [Fluentd Persistence](../../docs/FluentdPersistence.md). | `true` | -| `fluentd.persistence.storageClass` | If defined, storageClassName: . If set to "-", storageClassName: "", which disables dynamic provisioning. If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. (gp2 on AWS, standard on GKE, Azure & OpenStack) | `Nil` | -| `fluentd.persistence.accessMode` | The accessMode for persistence. | `ReadWriteOnce` | -| `fluentd.persistence.size` | The size needed for persistence. | `10Gi` | -| `fluentd.buffer.type` | Option to specify the Fluentd buffer as file/memory. If `fluentd.persistence.enabled` is `true`, this will be ignored. | `memory` | -| `fluentd.buffer.flushInterval` | How frequently to push logs to Sumo Logic. | `5s` | -| `fluentd.buffer.numThreads` | Increase number of http threads to Sumo. May be required in heavy logging/high DPM clusters. | `8` | -| `fluentd.buffer.chunkLimitSize` | The max size of each chunks: events will be written into chunks until the size of chunks become this size. | `1m` | -| `fluentd.buffer.queueChunkLimitSize` | Limit the number of queued chunks. | `128` | -| `fluentd.buffer.totalLimitSize` | The size limitation of this buffer plugin instance. | `128m` | -| `fluentd.buffer.filePaths` | File paths to buffer to, if Fluentd buffer type is specified as file above. Each sumologic output plugin buffers to its own unique file. | `{"events":"/fluentd/buffer/events","logs":{"containers":"/fluentd/buffer/logs.containers","default":"/fluentd/buffer/logs.default","kubelet":"/fluentd/buffer/logs.kubelet","systemd":"/fluentd/buffer/logs.systemd"},"metrics":{"apiserver":"/fluentd/buffer/metrics.apiserver","container":"/fluentd/buffer/metrics.container","controller":"/fluentd/buffer/metrics.controller","default":"/fluentd/buffer/metrics.default","kubelet":"/fluentd/buffer/metrics.kubelet","node":"/fluentd/buffer/metrics.node","scheduler":"/fluentd/buffer/metrics.scheduler","state":"/fluentd/buffer/metrics.state"},"traces":"/fluentd/buffer/traces"}` | -| `fluentd.buffer.extraConf` | Additional config for buffer settings | `Nil` | -| `fluentd.metadata.addOwners` | Option to control the enrichment of logs and metrics with pod owner metadata like `daemonset`, `deployment`, `replicaset`, `statefulset`. | `true` | -| `fluentd.metadata.addService` | Option to control the enrichment of logs and metrics with `service` metadata. | `true` | -| `fluentd.metadata.annotation_match` | Option to control capturing of annotations by metadata filter plugin. | `['sumologic\.com.*']` | -| `fluentd.metadata.apiGroups` | List of supported kubernetes API groups. | `['apps/v1']` | -| `fluentd.metadata.apiServerUrl` | Option to specify custom API server URL instead of the default, that is taken from KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT environment variables. Example: `"https://kubernetes.default.svc:443"`. | `""` | -| `fluentd.metadata.coreApiVersions` | List of supported kubernetes API versions. | `['v1']` | -| `fluentd.metadata.cacheSize` | Option to control the enabling of metadata filter plugin cache_size. | `10000` | -| `fluentd.metadata.cacheTtl` | Option to control the enabling of metadata filter plugin cache_ttl (in seconds). | `7200` | -| `fluentd.metadata.cacheRefresh` | Option to control the interval at which metadata cache is asynchronously refreshed (in seconds). | `3600` | -| `fluentd.metadata.cacheRefreshVariation` | Option to control the variation in seconds by which the cacheRefresh option is changed for each pod separately. For example, if cache refresh is 1 hour and variation is 15 minutes, then actual cache refresh interval will be a random value between 45 minutes and 1 hour 15 minutes, different for each pod. This helps spread the load on API server that the cache refresh induces. Setting this to 0 disables cache refresh variation. | `900` | -| `fluentd.metadata.cacheRefreshApiserverRequestDelay` | Option to control the delay with which cache refresh calls hit the api server.For example, if 0 then all metadata enrichment happen immediately. Setting this to a non-zero values ensures the traffic to api server is more distributed. | `0` | -| `fluentd.metadata.cacheRefreshExcludePodRegex` | Option to add regex for selectively disabling refresh for metadata in fluentd cache. For example, if regex is `(command-[a-z0-9]*)` then all pods starting with name `command` will not have their metadata refreshed and will be cleaned up from cache | `''` | -| `fluentd.metadata.pluginLogLevel` | Option to give plugin specific log level. | `error` | -| `fluentd.logs.enabled` | Flag to control deploying the Fluentd logs statefulsets. | `true` | -| `fluentd.logs.podDisruptionBudget` | Pod Disruption Budget for logs metadata enrichment. statefulset. | `{"minAvailable": 2}` | -| `fluentd.logs.statefulset.nodeSelector` | Node selector for Fluentd log statefulset. | `{}` | -| `fluentd.logs.statefulset.tolerations` | Tolerations for Fluentd log statefulset. | `[]` | -| `fluentd.logs.statefulset.affinity` | Affinity for Fluentd log statefulset. | `{}` | -| `fluentd.logs.statefulset.podAntiAffinity` | PodAntiAffinity for Fluentd log statefulset. | `soft` | -| `fluentd.logs.statefulset.topologySpreadConstraints` | TopologySpreadConstraints for Fluentd logs metadata enrichment statefulset. | `[]` | -| `fluentd.logs.statefulset.replicaCount` | Replica count for Fluentd log statefulset. | `3` | -| `fluentd.logs.statefulset.resources` | Resources for Fluentd log statefulset. | `{"limits":{"cpu":1,"memory":"1Gi"},"requests":{"cpu":0.5,"memory":"768Mi"}}` | -| `fluentd.logs.statefulset.podLabels` | Additional labels for fluentd log pods. | `{}` | -| `fluentd.logs.statefulset.podAnnotations` | Additional annotations for fluentd log pods. | `{}` | -| `fluentd.logs.statefulset.priorityClassName` | Priority class name for fluentd log pods. | `Nil` | -| `fluentd.logs.statefulset.initContainers` | Define init containers that will be run for fluentd logs statefulset. | `[]` | -| `fluentd.logs.autoscaling.enabled` | Option to turn autoscaling on for fluentd and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` | -| `fluentd.logs.autoscaling.minReplicas` | Default min replicas for autoscaling. | `3` | -| `fluentd.logs.autoscaling.maxReplicas` | Default max replicas for autoscaling. | `10` | -| `fluentd.logs.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `50` | -| `fluentd.logs.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `Nil` | -| `fluentd.logs.rawConfig` | Default log configuration. | `@include common.conf @include logs.conf` | -| `fluentd.logs.output.logFormat` | Format to post logs into Sumo: fields, json, json_merge, or text. | `fields` | -| `fluentd.logs.output.addTimestamp` | Option to control adding timestamp to logs. | `true` | -| `fluentd.logs.output.timestampKey` | Field name when add_timestamp is on. | `timestamp` | -| `fluentd.logs.output.pluginLogLevel` | Option to give plugin specific log level. | `error` | -| `fluentd.logs.output.extraConf` | Additional config parameters for sumologic output plugin | `Nil` | -| `fluentd.logs.extraLogs` | Additional config for custom log pipelines. | `Nil` | -| `fluentd.logs.containers.overrideRawConfig` | To override the entire contents of logs.source.containers.conf file. Leave empty for the default pipeline. | `Nil` | -| `fluentd.logs.containers.outputConf` | Default output configuration for container logs. | `@include logs.output.conf` | -| `fluentd.logs.containers.overrideOutputConf` | Override output section for container logs. Leave empty for the default output section. | `Nil` | -| `fluentd.logs.containers.sourceName` | Set the _sourceName metadata field in Sumo Logic. | `%{namespace}.%{pod}.%{container}` | -| `fluentd.logs.containers.sourceCategory` | Set the _sourceCategory metadata field in Sumo Logic. | `%{namespace}/%{pod_name}` | -| `fluentd.logs.containers.sourceCategoryPrefix` | Set the prefix, for _sourceCategory metadata. | `kubernetes/` | -| `fluentd.logs.containers.sourceCategoryReplaceDash` | Used to replace - with another character. | `/` | -| `fluentd.logs.containers.excludeContainerRegex` | A regular expression for containers. Matching containers will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` | -| `fluentd.logs.containers.excludeHostRegex` | A regular expression for hosts. Matching hosts will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` | -| `fluentd.logs.containers.excludeNamespaceRegex` | A regular expression for namespaces. Matching namespaces will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` | -| `fluentd.logs.containers.excludePodRegex` | A regular expression for pods. Matching pods will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` | -| `fluentd.logs.containers.k8sMetadataFilter.watch` | Option to control the enabling of metadata filter plugin watch. | `true` | -| `fluentd.logs.containers.k8sMetadataFilter.caFile` | path to CA file for Kubernetes server certificate validation. | `Nil` | -| `fluentd.logs.containers.k8sMetadataFilter.verifySsl` | Validate SSL certificates. | `true` | -| `fluentd.logs.containers.k8sMetadataFilter.clientCert` | Path to a client cert file to authenticate to the API server. | `Nil` | -| `fluentd.logs.containers.k8sMetadataFilter.clientKey` | Path to a client key file to authenticate to the API server. | `Nil` | -| `fluentd.logs.containers.k8sMetadataFilter.bearerTokenFile` | Path to a file containing the bearer token to use for authentication. | `Nil` | -| `fluentd.logs.containers.k8sMetadataFilter.tagToMetadataRegexp` | The regular expression used to extract kubernetes metadata (pod name, container name, namespace) from the current fluentd tag. | `.+?\.containers\.(?[^_]+)_(?[^_]+)_(?.+)-(?[a-z0-9]{64})\.log$` | -| `fluentd.logs.containers.extraFilterPluginConf` | To use additional filter plugins. | `Nil` | -| `fluentd.logs.containers.extraOutputPluginConf` | To use additional output plugins. | `Nil` | -| `fluentd.logs.containers.perContainerAnnotationsEnabled` | Enable container-level pod annotations. See [fluent-plugin-kubernetes-sumologic documentation](https://github.com/SumoLogic/sumologic-kubernetes-fluentd/tree/v1.12.2-sumo-6/fluent-plugin-kubernetes-sumologic#container-level-pod-annotations_) for more details. | `false` | -| `fluentd.logs.input.forwardExtraConf` | Configuration for the forward input plugin that receives logs from FluentBit. | `` | -| `fluentd.logs.kubelet.enabled` | Collect kubelet logs. | `true` | -| `fluentd.logs.kubelet.extraFilterPluginConf` | To use additional filter plugins. | `Nil` | -| `fluentd.logs.kubelet.extraOutputPluginConf` | To use additional output plugins. | `Nil` | -| `fluentd.logs.kubelet.outputConf` | Output configuration for kubelet. | `@include logs.output.conf` | -| `fluentd.logs.kubelet.overrideOutputConf` | Override output section for kubelet logs. Leave empty for the default output section. | `Nil` | -| `fluentd.logs.kubelet.sourceName` | Set the _sourceName metadata field in Sumo Logic. | `k8s_kubelet` | -| `fluentd.logs.kubelet.sourceCategory` | Set the _sourceCategory metadata field in Sumo Logic. | `kubelet` | -| `fluentd.logs.kubelet.sourceCategoryPrefix` | Set the prefix, for _sourceCategory metadata. | `kubernetes/` | -| `fluentd.logs.kubelet.sourceCategoryReplaceDash` | Used to replace - with another character. | `/` | -| `fluentd.logs.kubelet.excludeFacilityRegex` | A regular expression for facility. Matching facility will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` | -| `fluentd.logs.kubelet.excludeHostRegex` | A regular expression for hosts. Matching hosts will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` | -| `fluentd.logs.kubelet.excludePriorityRegex` | A regular expression for priority. Matching priority will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` | -| `fluentd.logs.kubelet.excludeUnitRegex` | A regular expression for unit. Matching unit will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` | -| `fluentd.logs.systemd.enabled` | Collect systemd logs. | `true` | -| `fluentd.logs.systemd.extraFilterPluginConf` | To use additional filter plugins. | `Nil` | -| `fluentd.logs.systemd.extraOutputPluginConf` | To use additional output plugins. | `Nil` | -| `fluentd.logs.systemd.outputConf` | Output configuration for systemd. | `@include logs.output.conf` | -| `fluentd.logs.systemd.overrideOutputConf` | Override output section for systemd logs. Leave empty for the default output section. | `Nil` | -| `fluentd.logs.systemd.sourceCategory` | Set the _sourceCategory metadata field in Sumo Logic. | `system` | -| `fluentd.logs.systemd.sourceCategoryPrefix` | Set the prefix, for _sourceCategory metadata. | `kubernetes/` | -| `fluentd.logs.systemd.sourceCategoryReplaceDash` | Used to replace - with another character. | `/` | -| `fluentd.logs.systemd.excludeFacilityRegex` | A regular expression for facility. Matching facility will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` | -| `fluentd.logs.systemd.excludeHostRegex` | A regular expression for hosts. Matching hosts will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` | -| `fluentd.logs.systemd.excludePriorityRegex` | A regular expression for priority. Matching priority will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` | -| `fluentd.logs.systemd.excludeUnitRegex` | A regular expression for unit. Matching unit will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` | -| `fluentd.logs.default.extraFilterPluginConf` | To use additional filter plugins. | `Nil` | -| `fluentd.logs.default.extraOutputPluginConf` | To use additional output plugins. | `Nil` | -| `fluentd.logs.default.outputConf` | Default log configuration (catch-all). | `@include logs.output.conf` | -| `fluentd.logs.default.overrideOutputConf` | Override output section for untagged logs. Leave empty for the default output section. | `Nil` | -| `fluentd.metrics.enabled` | Flag to control deploying the Fluentd metrics statefulsets. | `true` | -| `fluentd.metrics.podDisruptionBudget` | Pod Disruption Budget for metrics metadata enrichment. statefulset. | `{"minAvailable": 2}` | -| `fluentd.metrics.statefulset.nodeSelector` | Node selector for Fluentd metrics statefulset. | `{}` | -| `fluentd.metrics.statefulset.tolerations` | Tolerations for Fluentd metrics statefulset. | `[]` | -| `fluentd.metrics.statefulset.affinity` | Affinity for Fluentd metrics statefulset. | `{}` | -| `fluentd.metrics.statefulset.podAntiAffinity` | PodAntiAffinity for Fluentd metrics statefulset. | `soft` | -| `fluentd.metrics.statefulset.topologySpreadConstraints` | TopologySpreadConstraints for Fluentd metrics metadata enrichment statefulset. | `[]` | -| `fluentd.metrics.statefulset.replicaCount` | Replica count for Fluentd metrics statefulset. | `3` | -| `fluentd.metrics.statefulset.resources` | Resources for Fluentd metrics statefulset. | `{"limits":{"cpu":1,"memory":"1Gi"},"requests":{"cpu":0.5,"memory":"768Mi"}}` | -| `fluentd.metrics.statefulset.podLabels` | Additional labels for fluentd metrics pods. | `{}` | -| `fluentd.metrics.statefulset.podAnnotations` | Additional annotations for fluentd metrics pods. | `{}` | -| `fluentd.metrics.statefulset.priorityClassName` | Priority class name for fluentd metrics pods. | `Nil` | -| `fluentd.metrics.statefulset.initContainers` | Define init containers that will be run for fluentd metrics statefulset. | `[]` | -| `fluentd.metrics.autoscaling.enabled` | Option to turn autoscaling on for fluentd and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` | -| `fluentd.metrics.autoscaling.minReplicas` | Default min replicas for autoscaling. | `3` | -| `fluentd.metrics.autoscaling.maxReplicas` | Default max replicas for autoscaling. | `10` | -| `fluentd.metrics.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `50` | -| `fluentd.metrics.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `Nil` | -| `fluentd.metrics.rawConfig` | Raw config for fluentd metrics. | `@include common.conf @include metrics.conf` | -| `fluentd.metrics.outputConf` | Configuration for sumologic output plugin. | `@include metrics.output.conf` | -| `fluentd.metrics.extraEnvVars` | Additional environment variables for metrics metadata enrichment pods. | `Nil` | -| `fluentd.metrics.extraVolumes` | Additional volumes for metrics metadata enrichment pods. | `Nil` | -| `fluentd.metrics.extraVolumeMounts` | Additional volume mounts for metrics metadata enrichment pods. | `Nil` | -| `fluentd.metrics.extraOutputConf` | Additional config parameters for sumologic output plugin | `Nil` | -| `fluentd.metrics.extraFilterPluginConf` | To use additional filter plugins. | `Nil` | -| `fluentd.metrics.extraOutputPluginConf` | To use additional output plugins. | `Nil` | -| `fluentd.metrics.overrideOutputConf` | Override output section for metrics. Leave empty for the default output section. | `Nil` | -| `fluentd.monitoring` | Configuration of fluentd monitoring metrics. Adds the `fluentd_input_status_num_records_total` metric for input and the `fluentd_output_status_num_records_total` metric for output. | `{"input": false, "output": false}` | -| `fluentd.events.enabled` | If enabled, collect K8s events. | `true` | -| `fluentd.events.statefulset.nodeSelector` | Node selector for Fluentd events statefulset. | `{}` | -| `fluentd.events.statefulset.affinity` | Affinity for Fluentd events statefulset. | `{}` | -| `fluentd.events.statefulset.tolerations` | Tolerations for Fluentd events statefulset. | `[]` | -| `fluentd.events.statefulset.resources` | Resources for Fluentd log statefulset. | `{"limits":{"cpu":"100m","memory":"256Mi"},"requests":{"cpu":"100m","memory":"256Mi"}}` | -| `fluentd.events.statefulset.podLabels` | Additional labels for fluentd events pods. | `{}` | -| `fluentd.events.statefulset.podAnnotations` | Additional annotations for fluentd events pods. | `{}` | -| `fluentd.events.statefulset.priorityClassName` | Priority class name for fluentd events pods. | `Nil` | -| `fluentd.events.statefulset.initContainers` | Define init containers that will be run for fluentd events statefulset. | `[]` | -| `fluentd.events.sourceName` | Source name for the Events source. Default: "events" | `Nil` | -| `fluentd.events.sourceCategory` | Source category for the Events source. Default: "{clusterName}/events" | `Nil` | -| `fluentd.events.overrideOutputConf` | Override output section for events. Leave empty for the default output section. | `Nil` | -| `metrics-server.enabled` | Set the enabled flag to true for enabling metrics-server. This is required before enabling fluentd autoscaling unless you have an existing metrics-server in the cluster. | `false` | -| `metrics-server.fullnameOverride` | Used to override the chart's full name. | `Nil` | -| `metrics-server.args` | Arguments for metric server. | `["--kubelet-insecure-tls","--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname"]` | -| `fluent-bit.fullnameOverride` | Used to override the chart's full name. | `Nil` | -| `fluent-bit.resources` | Resources for Fluent-bit daemonsets. | `{}` | -| `fluent-bit.enabled` | Flag to control deploying Fluent-bit Helm sub-chart. | `true` | -| `fluent-bit.config.service` | Configure Fluent-bit Helm sub-chart service. | [fluent-bit.config.service in values.yaml](https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/v2.0.0/deploy/helm/sumologic/values.yaml#L817-L827) | -| `fluent-bit.config.inputs` | Configure Fluent-bit Helm sub-chart inputs. Configuration for logs from different container runtimes is described in [Container log parsing](../../docs/ContainerLogs.md). | [fluent-bit.config.inputs in values.yaml](https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/v2.0.0/deploy/helm/sumologic/values.yaml#L828-L895) | -| `fluent-bit.config.outputs` | Configure Fluent-bit Helm sub-chart outputs. | [fluent-bit.config.outputs in values.yaml](https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/v2.0.0/deploy/helm/sumologic/values.yaml#L896-L906) | -| `fluent-bit.config.customParsers` | Configure Fluent-bit Helm sub-chart customParsers. | [fluent-bit.config.customParsers in values.yaml](https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/v2.0.0/deploy/helm/sumologic/values.yaml#L907-L917) | -| `fluent-bit.service.labels` | Labels for fluent-bit service. | `{sumologic.com/scrape: "true"}` | -| `fluent-bit.podLabels` | Additional labels for fluent-bit pods. | `{}` | -| `fluent-bit.podAnnotations` | Additional annotations for fluent-bit pods. | `{}` | -| `fluent-bit.service.flush` | Frequency to flush fluent-bit buffer to fluentd. | `5` | -| `fluent-bit.metrics.enabled` | Enable metrics from fluent-bit. | `true` | -| `fluent-bit.env` | Environment variables for fluent-bit. | `[{"name":"FLUENTD_LOGS_SVC","valueFrom":{"configMapKeyRef":{"key":"fluentdLogs","name":"sumologic-configmap"}}},{"name":"NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}]` | -| `fluent-bit.backend.type` | Set the backend to which Fluent-Bit should flush the information it gathers | `forward` | -| `fluent-bit.backend.forward.host` | Target host where Fluent-Bit or Fluentd are listening for Forward messages. | `${FLUENTD_LOGS_SVC}.${NAMESPACE}.svc.cluster.local.` | -| `fluent-bit.backend.forward.port` | TCP Port of the target service. | `24321` | -| `fluent-bit.backend.forward.tls` | Enable or disable TLS support. | `off` | -| `fluent-bit.backend.forward.tls_verify` | Force certificate validation. | `on` | -| `fluent-bit.backend.forward.tls_debug` | Set TLS debug verbosity level. It accept the following values: 0-4. | `1` | -| `fluent-bit.backend.forward.shared_key` | A key string known by the remote Fluentd used for authorization. | `Nil` | -| `fluent-bit.trackOffsets` | Specify whether to track the file offsets for tailing docker logs. This allows fluent-bit to pick up where it left after pod restarts but requires access to a hostPath. | `true` | -| `fluent-bit.tolerations` | Optional daemonset tolerations. | `[{"effect":"NoSchedule","operator":"Exists"}]` | -| `fluent-bit.input.systemd.enabled` | Enable systemd input. | `true` | -| `fluent-bit.parsers.enabled` | Enable custom parsers. | `true` | -| `fluent-bit.parsers.regex` | List of regex parsers. | `[{"name":"multi_line","regex":"(?\u003clog\u003e^{\"log\":\"\\d{4}-\\d{1,2}-\\d{1,2}.\\d{2}:\\d{2}:\\d{2}.*)"}]` | -| `kube-prometheus-stack.kubeTargetVersionOverride` | Provide a target gitVersion of K8S, in case .Capabilites.KubeVersion is not available (e.g. helm template). Changing this may break Sumo Logic apps. | `1.13.0-0` | -| `kube-prometheus-stack.enabled` | Flag to control deploying Prometheus Operator Helm sub-chart. | `true` | -| `kube-prometheus-stack.fullnameOverride` | Used to override the chart's full name. | `Nil` | -| `kube-prometheus-stack.alertmanager.enabled` | Deploy alertmanager. | `false` | -| `kube-prometheus-stack.grafana.enabled` | If true, deploy the grafana sub-chart. | `false` | -| `kube-prometheus-stack.grafana.defaultDashboardsEnabled` | Deploy default dashboards. These are loaded using the sidecar. | `false` | -| `kube-prometheus-stack.prometheusOperator.podLabels` | Additional labels for prometheus operator pods. | `{}` | -| `kube-prometheus-stack.prometheusOperator.podAnnotations` | Additional annotations for prometheus operator pods. | `{}` | -| `kube-prometheus-stack.prometheusOperator.resources` | Resource limits for prometheus operator. Uses sub-chart defaults. | `{}` | -| `kube-prometheus-stack.prometheusOperator.admissionWebhooks.enabled` | Create PrometheusRules admission webhooks. Mutating webhook will patch PrometheusRules objects indicating they were validated. Validating webhook will check the rules syntax. | `false` | -| `kube-prometheus-stack.prometheusOperator.tls.enabled` | Enable TLS in prometheus operator. | `false` | -| `kube-prometheus-stack.kube-state-metrics.fullnameOverride` | Used to override the chart's full name. | `Nil` | -| `kube-prometheus-stack.kube-state-metrics.resources` | Resource limits for kube state metrics. Uses sub-chart defaults. | `{}` | -| `kube-prometheus-stack.kube-state-metrics.customLabels` | Custom labels to apply to service, deployment and pods. Uses sub-chart defaults. | `{}` | -| `kube-prometheus-stack.kube-state-metrics.podAnnotations` | Additional annotations for pods in the DaemonSet. Uses sub-chart defaults. | `{}` | -| `kube-prometheus-stack.prometheus.additionalServiceMonitors` | List of ServiceMonitor objects to create. | `[{"additionalLabels":{"app":"collection-sumologic-fluentd-logs"},"endpoints":[{"port":"metrics"}],"name":"collection-sumologic-fluentd-logs","namespaceSelector":{"matchNames":["sumologic"]},"selector":{"matchLabels":{"app":"collection-sumologic-fluentd-logs"}}},{"additionalLabels":{"app":"collection-sumologic-fluentd-metrics"},"endpoints":[{"port":"metrics"}],"name":"collection-sumologic-fluentd-metrics","namespaceSelector":{"matchNames":["sumologic"]},"selector":{"matchLabels":{"app":"collection-sumologic-fluentd-metrics"}}},{"additionalLabels":{"app":"collection-sumologic-fluentd-events"},"endpoints":[{"port":"metrics"}],"name":"collection-sumologic-fluentd-events","namespaceSelector":{"matchNames":["sumologic"]},"selector":{"matchLabels":{"app":"collection-sumologic-fluentd-events"}}},{"additionalLabels":{"app":"collection-fluent-bit"},"endpoints":[{"path":"/api/v1/metrics/prometheus","port":"metrics"}],"name":"collection-fluent-bit","namespaceSelector":{"matchNames":["sumologic"]},"selector":{"matchLabels":{"app":"fluent-bit"}}},{"additionalLabels":{"app":"collection-sumologic-otelcol"},"endpoints":[{"port":"metrics"}],"name":"collection-sumologic-otelcol","namespaceSelector":{"matchNames":["sumologic"]},"selector":{"matchLabels":{"app":"collection-sumologic-otelcol"}}}]` | -| `kube-prometheus-stack.prometheus.prometheusSpec.resources` | Resource limits for prometheus. Uses sub-chart defaults. | `{}` | -| `kube-prometheus-stack.prometheus.prometheusSpec.thanos.baseImage` | Base image for Thanos container. | `quay.io/thanos/thanos` | -| `kube-prometheus-stack.prometheus.prometheusSpec.thanos.version` | Image tag for Thanos container. | `v0.10.0` | -| `kube-prometheus-stack.prometheus.prometheusSpec.containers` | Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod. | `[{"env":[{"name":"FLUENTD_METRICS_SVC","valueFrom":{"configMapKeyRef":{"key":"fluentdMetrics","name":"sumologic-configmap"}}},{"name":"NAMESPACE","valueFrom":{"configMapKeyRef":{"key":"fluentdNamespace","name":"sumologic-configmap"}}}],"name":"prometheus-config-reloader"}]` | -| `kube-prometheus-stack.prometheus.prometheusSpec.podMetadata.labels` | Add custom pod labels to prometheus pods | `{}` | -| `kube-prometheus-stack.prometheus.prometheusSpec.podMetadata.annotations` | Add custom pod annotations to prometheus pods | `{}` | -| `kube-prometheus-stack.prometheus.prometheusSpec.remoteWrite` | If specified, the remote_write spec. | See values.yaml | -| `kube-prometheus-stack.prometheus.prometheusSpec.walCompression` | Enables walCompression in Prometheus | `true` | -| `kube-prometheus-stack.prometheus-node-exporter.fullnameOverride` | Used to override the chart's full name. | `Nil` | -| `kube-prometheus-stack.prometheus-node-exporter.podLabels` | Additional labels for prometheus-node-exporter pods. | `{}` | -| `kube-prometheus-stack.prometheus-node-exporter.podAnnotations` | Additional annotations for prometheus-node-exporter pods. | `{}` | -| `kube-prometheus-stack.prometheus-node-exporter.resources` | Resource limits for node exporter. Uses sub-chart defaults. | `{}` | -| `falco.enabled` | Flag to control deploying Falco Helm sub-chart. | `false` | -| `falco.fullnameOverride` | Used to override the chart's full name. | `Nil` | -| `falco.addKernelDevel` | Flag to control installation of `kernel-devel` on nodes using MachineConfig, required to build falco modules (only for OpenShift) | `true` | -| `falco.extraInitContainers` | InitContainers for Falco pod | `[{'name': 'init-falco', 'image': 'busybox', 'command': ['sh', '-c', 'while [ -f /host/etc/redhat-release ] && [ -z "$(ls /host/usr/src/kernels)" ] ; do\necho "waiting for kernel headers to be installed"\nsleep 3\ndone\n'], 'volumeMounts': [{'mountPath': '/host/usr', 'name': 'usr-fs', 'readOnly': True}, {'mountPath': '/host/etc', 'name': 'etc-fs', 'readOnly': True}]}]` | -| `falco.ebpf.enabled` | Enable eBPF support for Falco instead of falco-probe kernel module. Set to true for GKE. | `false` | -| `falco.falco.jsonOutput` | Output events in json. | `true` | -| `falco.pullSecrets` | Pull secrets for falco images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config). | `[]` | -| `telegraf-operator.enabled` | Flag to control deploying Telegraf Operator Helm sub-chart. | `false` | -| `telegraf-operator.fullnameOverride` | Used to override the chart's full name. | `Nil` | -| `telegraf-operator.replicaCount` | Replica count for Telegraf Operator pods. | 1 | -| `telegraf-operator.classes.secretName` | Secret name in which the Telegraf Operator configuration will be stored. | `telegraf-operator-classes` | -| `telegraf-operator.default` | Name of the default output configuration. | `sumologic-prometheus` | -| `telegraf-operator.data` | Telegraf sidecar configuration. | `{"sumologic-prometheus": "[[outputs.prometheus_client]]\\n ## Configuration details:\\n ## https://github.com/influxdata/telegraf/tree/master/plugins/outputs/prometheus_client#configuration\\n listen = ':9273'\\n metric_version = 2\\n"}` | -| `otelagent.enabled` | Enables OpenTelemetry Collector Agent mode DaemonSet. | `false` | -| `otelcol.deployment.replicas` | Set the number of OpenTelemetry Collector replicas. | `1` | -| `otelcol.deployment.resources.limits.memory` | Sets the OpenTelemetry Collector memory limit. | `2Gi` | -| `otelcol.deployment.priorityClassName` | Priority class name for OpenTelemetry Collector log pods. | `Nil` | -| `otelcol.metrics.enabled` | Enable or disable generation of the metrics from Collector. | `true` | -| `otelcol.config.service.pipelines.traces.receivers` | Sets the list of enabled receivers. | `{jaeger, opencensus, otlp, zipkin}` | -| `otelcol.config.exporters.zipkin.timeout` | Sets the Zipkin (default) exporter timeout. Append the unit, e.g. `s` when setting the parameter | `5s` | -| `otelcol.config.exporters.logging.loglevel` | When tracing debug logging exporter is enabled, sets the verbosity level. Use either `info` or `debug`. | `info` | -| `otelcol.config.service.pipelines.traces.exporters` | Sets the list of exporters enabled within OpenTelemetry Collector. Available values: `zipkin`, `logging`. Set to `{zipkin, logging}` to enable logging debugging exporter. | `{zipkin}` | -| `otelcol.config.service.pipelines.traces.processors` | Sets the list of enabled OpenTelemetry Collector processors. | `{memory_limiter, k8s_tagger, source, resource, batch, queued_retry}` | -| `otelcol.config.processors.memory_limiter.limit_mib` | Sets the OpenTelemetry Collector memory limitter plugin value (in MiB). Should be at least 100 Mib less than the value of `otelcol.deployment.resources.limits.memory`. | `1900` | -| `otelcol.config.processors.batch.send_batch_size` | Sets the preferred size of batch (in number of spans). | `256` | -| `otelcol.config.processors.batch.send_batch_max_size` | Sets the maximum allowed size of a batch (in number of spans). Use with caution, setting too large value might cause 413 Payload Too Large errors. | `512` | -| `otelcol.logLevelFilter` | Do not send otelcol logs if `true`. | `true` | -| `metadata.image.repository` | Image repository for otelcol docker container. | `public.ecr.aws/sumologic/sumologic-otel-collector` | -| `metadata.image.tag` | Image tag for otelcol docker container. | `0.0.18` | -| `metadata.image.pullPolicy` | Image pullPolicy for otelcol docker container. | `IfNotPresent` | -| `metadata.securityContext` | The securityContext configuration for otelcol. | `{"fsGroup": 999}` | -| `metadata.podLabels` | Additional labels for all otelcol pods. | `{}` | -| `metadata.podAnnotations` | Additional annotations for all otelcol pods. | `{}` | -| `metadata.serviceLabels` | Additional labels for all otelcol pods. | `{}` | -| `metadata.persistence.enabled` | Flag to control persistence for OpenTelemetry Collector. | `true` | -| `metadata.persistence.storageClass` | Defines storageClassName for the PersistentVolumeClaim which is used to provide persistence for OpenTelemetry Collector. | `Nil` | -| `metadata.persistence.accessMode` | The accessMode for the volume which is used to provide persistence for OpenTelemetry Collector. | `ReadWriteOnce` | -| `metadata.persistence.size` | Size of the volume which is used to provide persistence for OpenTelemetry Collector. | `10Gi` | -| `metadata.persistence.pvcLabels` | Additional PersistentVolumeClaim labels for all OpenTelemetry Collector pods. | `{}` | -| `metadata.metrics.enabled` | Flag to control deploying the otelcol metrics statefulsets. | `true` | -| `metadata.metrics.logLevel` | Flag to control logging level for OpenTelemetry Collector for metrics. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. | `info` | -| `metadata.metrics.config` | Configuration for metrics otelcol. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/Configuration.md. | [metadata.metrics.config in values.yaml](./values.yaml#L3085-L3385) | -| `metadata.metrics.statefulset.containers.otelcol.startupProbe` | Startup probe configuration for metrics otelcol container. | `{ periodSeconds: 3, failureThreshold: 60}` | -| `metadata.metrics.statefulset.nodeSelector` | Node selector for metrics metadata enrichment (otelcol) statefulset. | `{}` | -| `metadata.metrics.statefulset.tolerations` | Tolerations for metrics metadata enrichment (otelcol) statefulset. | `[]` | -| `metadata.metrics.statefulset.affinity` | Affinity for metrics metadata enrichment (otelcol) statefulset. | `{}` | -| `metadata.metrics.statefulset.podAntiAffinity` | PodAntiAffinity for metrics metadata enrichment (otelcol) statefulset. | `soft` | -| `metadata.metrics.statefulset.topologySpreadConstraints` | TopologySpreadConstraints for metrics metadata enrichment (otelcol) statefulset. | `[]` | -| `metadata.metrics.statefulset.replicaCount` | Replica count for metrics metadata enrichment (otelcol) statefulset. | `3` | -| `metadata.metrics.statefulset.resources` | Resources for metrics metadata enrichment (otelcol) statefulset. | `{"limits":{"cpu":1,"memory":"1Gi"},"requests":{"cpu":0.5,"memory":"768Mi"}}` | -| `metadata.metrics.statefulset.priorityClassName` | Priority class name for metrics metadata enrichment (otelcol) pods. | `Nil` | -| `metadata.metrics.statefulset.podLabels` | Additional labels for metrics metadata enrichment (otelcol) pods. | `{}` | -| `metadata.metrics.statefulset.podAnnotations` | Additional annotations for metrics metadata enrichment (otelcol) pods. | `{}` | -| `metadata.metrics.statefulset.containers.metadata.securityContext` | The securityContext configuration for otelcol container for metrics metadata enrichment statefulset. | `{}` | -| `metadata.metrics.statefulset.extraEnvVars` | Additional environment variables for metrics metadata enrichment (otelcol) pods. | `Nil` | -| `metadata.metrics.statefulset.extraVolumes` | Additional volumes for metrics metadata enrichment (otelcol) pods. | `Nil` | -| `metadata.metrics.statefulset.extraVolumeMounts` | Additional volume mounts for metrics metadata enrichment (otelcol) pods. | `Nil` | -| `metadata.metrics.autoscaling.enabled` | Option to turn autoscaling on for metrics metadata enrichment (otelcol) and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` | -| `metadata.metrics.autoscaling.minReplicas` | Default min replicas for autoscaling. | `3` | -| `metadata.metrics.autoscaling.maxReplicas` | Default max replicas for autoscaling | `10` | -| `metadata.metrics.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `50` | -| `metadata.metrics.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `Nil` | -| `metadata.metrics.podDisruptionBudget` | Pod Disruption Budget for metrics metadata enrichment (otelcol) statefulset. | `{"minAvailable": 2}` | -| `metadata.logs.enabled` | Flag to control deploying the otelcol logs statefulsets. | `true` | -| `metadata.logs.logLevel` | Flag to control logging level for OpenTelemetry Collector for logs. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. | `info` | -| `metadata.logs.config` | Configuration for logs otelcol. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/Configuration.md. | [metadata.metrics.config in values.yaml](./values.yaml#L3457-L3744) | -| `metadata.logs.statefulset.containers.otelcol.startupProbe` | Startup probe configuration for logs otelcol container. | `{ periodSeconds: 3, failureThreshold: 60}` | -| `metadata.logs.statefulset.nodeSelector` | Node selector for logs metadata enrichment (otelcol) statefulset. | `{}` | -| `metadata.logs.statefulset.tolerations` | Tolerations for logs metadata enrichment (otelcol) statefulset. | `[]` | -| `metadata.logs.statefulset.affinity` | Affinity for logs metadata enrichment (otelcol) statefulset. | `{}` | -| `metadata.logs.statefulset.podAntiAffinity` | PodAntiAffinity for logs metadata enrichment (otelcol) statefulset. | `soft` | -| `metadata.logs.statefulset.topologySpreadConstraints` | TopologySpreadConstraints for logs metadata enrichment (otelcol) statefulset. | `[]` | -| `metadata.logs.statefulset.replicaCount` | Replica count for logs metadata enrichment (otelcol) statefulset. | `3` | -| `metadata.logs.statefulset.resources` | Resources for logs metadata enrichment (otelcol) statefulset. | `{"limits":{"cpu":1,"memory":"1Gi"},"requests":{"cpu":0.5,"memory":"768Mi"}}` | -| `metadata.logs.statefulset.priorityClassName` | Priority class name for logs metadata enrichment (otelcol) pods. | `Nil` | -| `metadata.logs.statefulset.podLabels` | Additional labels for logs metadata enrichment (otelcol) pods. | `{}` | -| `metadata.logs.statefulset.podAnnotations` | Additional annotations for logs metadata enrichment (otelcol) pods. | `{}` | -| `metadata.logs.statefulset.containers.metadata.securityContext` | The securityContext configuration for otelcol container for logs metadata enrichment statefulset. | `{}` | -| `metadata.logs.statefulset.extraEnvVars` | Additional environment variables for logs metadata enrichment (otelcol) pods. | `Nil` | -| `metadata.logs.statefulset.extraVolumes` | Additional volumes for logs metadata enrichment (otelcol) pods. | `Nil` | -| `metadata.logs.statefulset.extraVolumeMounts` | Additional volume mounts for logs metadata enrichment (otelcol) pods. | `Nil` | -| `metadata.logs.autoscaling.enabled` | Option to turn autoscaling on for logs metadata enrichment (otelcol) and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` | -| `metadata.logs.autoscaling.minReplicas` | Default min replicas for autoscaling. | `3` | -| `metadata.logs.autoscaling.maxReplicas` | Default max replicas for autoscaling | `10` | -| `metadata.logs.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `50` | -| `metadata.logs.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `Nil` | -| `metadata.logs.podDisruptionBudget` | Pod Disruption Budget for logs metadata enrichment (otelcol) statefulset. | `{"minAvailable": 2}` | -| `tailing-sidecar-operator.enabled` | Flag to control deploying Tailing Sidecar Operator Helm sub-chart. | `false` | -| `tailing-sidecar-operator.fullnameOverride` | Used to override the chart's full name. | `Nil` | +| Parameter | Description | Default | +|---------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `nameOverride` | Used to override the Chart name. | `Nil` | +| `fullnameOverride` | Used to override the chart's full name. | `Nil` | +| `sumologic.setupEnabled` | If enabled, a pre-install hook will create Collector and Sources in Sumo Logic. | `true` | +| `sumologic.cleanupEnabled` | If enabled, a pre-delete hook will destroy Kubernetes secret and Sumo Logic Collector. | `false` | +| `sumologic.logs.enabled` | Set the enabled flag to false for disabling logs ingestion altogether. | `true` | +| `sumologic.metrics.enabled` | Set the enabled flag to false for disabling metrics ingestion altogether. | `true` | +| `sumologic.logs.fields` | Fields to be created at Sumo Logic to ensure logs are tagged with relevant metadata. [Sumo Logic help](https://help.sumologic.com/Manage/Fields#Manage_fields) | `{}` | +| `sumologic.logs.metadata.provider` | Set provider to use for logs forwarding and metadata enrichment. Can be either otelcol or fluentd. | `fluentd` | +| `sumologic.metrics.metadata.provider` | Set provider to use for metrics forwarding and metadata enrichment. Can be either otelcol or fluentd. | `fluentd` | +| `sumologic.metrics.remoteWriteProxy.enabled` | Enable a load balancing proxy for Prometheus remote writes. [See docs/Best_Practices.md for more information.](../../docs/Best_Practices.md) | `fluentd` | +| `sumologic.metrics.remoteWriteProxy.config.clientBodyBufferSize` | See the [nginx documentation](http://nginx.org/en/docs/http/ngx_http_core_module.html#client_body_buffer_size). Increase if you've also increased samples per send in Prometheus remote write. | `fluentd` | +| `sumologic.metrics.remoteWriteProxy.config.workerCountAutotune` | This feature autodetects how much CPU is assigned to the nginx instance and setsthe right amount of workers based on that. Disable to use the default of 8 workers. | | +| `sumologic.metrics.remoteWriteProxy.replicaCount` | Number of replicas in the remote write proxy deployment. | `fluentd` | +| `sumologic.metrics.remoteWriteProxy.image` | Nginx docker image for the remote write proxy. | `fluentd` | +| `sumologic.metrics.remoteWriteProxy.resources` | Resource requests and limits for the remote write proxy container. | `fluentd` | +| `sumologic.metrics.remoteWriteProxy.livenessProbe` | Liveness probe settings for the remote write proxy container. | `fluentd` | +| `sumologic.metrics.remoteWriteProxy.readinessProbe` | Readiness probe settings for the remote write proxy container. | `fluentd` | +| `sumologic.metrics.remoteWriteProxy.securityContext` | The securityContext configuration for the remote write proxy. | `{}` | +| `sumologic.metrics.remoteWriteProxy.nodeSelector` | Node selector for the remote write proxy deployment. | `{}` | +| `sumologic.metrics.remoteWriteProxy.tolerations` | Tolerations for the remote write proxy deployment. | `[]` | +| `sumologic.metrics.remoteWriteProxy.affinity` | Affinity for the remote write proxy deployment. | `{}` | +| `sumologic.metrics.remoteWriteProxy.priorityClassName` | Priority class name for the remote write proxy deployment. | `Nil` | +| `sumologic.metrics.remoteWriteProxy.podLabels` | Additional labels for the remote write proxy container. | `{}` | +| `sumologic.metrics.remoteWriteProxy.podAnnotations` | Additional annotations for for the remote write proxy container. | `{}` | +| `sumologic.traces.enabled` | Set the enabled flag to true to enable tracing ingestion. _Tracing must be enabled for the account first. Please contact your Sumo representative for activation details_ | `false` | +| `sumologic.envFromSecret` | If enabled, accessId and accessKey will be sourced from Secret Name given. Be sure to include at least the following env variables in your secret (1) SUMOLOGIC_ACCESSID, (2) SUMOLOGIC_ACCESSKEY | `sumo-api-secret` | +| `sumologic.accessId` | Sumo access ID. | `Nil` | +| `sumologic.accessKey` | Sumo access key. | `Nil` | +| `sumologic.endpoint` | Sumo API endpoint; Leave blank for automatic endpoint discovery and redirection. | `Nil` | +| `sumologic.collectionMonitoring` | If set to `false`, excludes all metrics which name matches `/^up\ |^prometheus_remote_storage_.*\|^fluentd_.*\|^fluentbit.*\|^otelcol.*$/` regexp and excludes all container logs and traces coming from collection namespace. | `false` | +| `sumologic.collectorName` | The name of the Sumo Logic collector that will be created in the SetUp job. Defaults to `clusterName` if not specified. | `Nil` | +| `sumologic.clusterName` | An identifier for the Kubernetes cluster. Whitespaces in the cluster name will be replaced with dashes. | `kubernetes` | +| `sumologic.collector.sources` | Configuration of HTTP sources. [See docs/Terraform.md for more information](../../docs/Terraform.md).. | [sumologic.collector.sources in values.yaml](https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/v2.0.0/deploy/helm/sumologic/values.yaml#L114-L164) | +| `sumologic.httpProxy` | HTTP proxy URL | `Nil` | +| `sumologic.httpsProxy` | HTTPS proxy URL | `Nil` | +| `sumologic.noProxy` | List of comma separated hostnames which should be excluded from the proxy | `kubernetes.default.svc` | +| `sumologic.pullSecrets` | Optional list of secrets that will be used for pulling images for Sumo Logic's deployments and statefulsets. | `Nil` | +| `sumologic.podLabels` | Additional labels for the pods. | `{}` | +| `sumologic.podAnnotations` | Additional annotations for the pods. | `{}` | +| `sumologic.scc.create` | Create OpenShift's Security Context Constraint | `false` | +| `sumologic.serviceAccount.annotations` | Add custom annotations to sumologic serviceAccounts | `{}` | +| `sumologic.setup.job.pullSecrets` | Optional list of secrets that will be used for pulling images for Sumo Logic's setup job. | `Nil` | +| `sumologic.setup.job.podLabels` | Additional labels for the setup Job pod. | `{}` | +| `sumologic.setup.job.podAnnotations` | Additional annotations for the setup Job pod. | `{}` | +| `sumologic.setup.job.image.repository` | Image repository for Sumo Logic setup job docker container. | `sumologic/kubernetes-fluentd` | +| `sumologic.setup.job.image.tag` | Image tag for Sumo Logic setup job docker container. | `1.3.0` | +| `sumologic.setup.job.image.pullPolicy` | Image pullPolicy for Sumo Logic docker container. | `IfNotPresent` | +| `sumologic.setup.monitors.enabled` | If enabled, a pre-install hook will create k8s monitors in Sumo Logic. | `true` | +| `sumologic.setup.monitors.monitorStatus` | The installed monitors default status: enabled/disabled. | `enabled` | +| `sumologic.setup.monitors.notificationEmails` | A list of emails to send notifications from monitors. | `[]` | +| `sumologic.setup.dashboards.enabled` | If enabled, a pre-install hook will install k8s dashboards in Sumo Logic. | `true` | +| `fluentd.image.repository` | Image repository for Sumo Logic docker container. | `sumologic/kubernetes-fluentd` | +| `fluentd.image.tag` | Image tag for Sumo Logic docker container. | `1.3.0` | +| `fluentd.image.pullPolicy` | Image pullPolicy for Sumo Logic docker container. | `IfNotPresent` | +| `fluentd.logLevelFilter` | Do not send fluentd logs if set to `true`. | `true` | +| `fluentd.additionalPlugins` | Additional Fluentd plugins to install from RubyGems. Please see our [documentation](../../docs/Additional_Fluentd_Plugins.md) for more information. | `[]` | +| `fluentd.compression.enabled` | Flag to control if data is sent to Sumo Logic compressed or not | `true` | +| `fluentd.compression.encoding` | Specifies which encoding should be used to compress data (either `gzip` or `deflate`) | `gzip` | +| `fluentd.logLevel` | Sets the fluentd log level. The default log level, if not specified, is info. Sumo will only ingest the error log level and some specific warnings, the info logs can be seen in kubectl logs. | `info` | +| `fluentd.verifySsl` | Verify SumoLogic HTTPS certificates. | `true` | +| `fluentd.proxyUri` | Proxy URI for sumologic output plugin. | `Nil` | +| `fluentd.securityContext` | The securityContext configuration for Fluentd | `{"fsGroup":999}` | +| `fluentd.podLabels` | Additional labels for all fluentd pods | `{}` | +| `fluentd.pvcLabels` | Additional labels for all fluentd PVCs | `{}` | +| `fluentd.podAnnotations` | Additional annotations for all fluentd pods | `{}` | +| `fluentd.podSecurityPolicy.create` | If true, create & use `podSecurityPolicy` for fluentd resources | `false` | +| `fluentd.persistence.enabled` | Persist data to a persistent volume; When enabled, fluentd uses the file buffer instead of memory buffer. After changing this value follow steps described in [Fluentd Persistence](../../docs/FluentdPersistence.md). | `true` | +| `fluentd.persistence.storageClass` | If defined, storageClassName: . If set to "-", storageClassName: "", which disables dynamic provisioning. If undefined (the default) or set to null, no storageClassName spec is set, choosing the default provisioner. (gp2 on AWS, standard on GKE, Azure & OpenStack) | `Nil` | +| `fluentd.persistence.accessMode` | The accessMode for persistence. | `ReadWriteOnce` | +| `fluentd.persistence.size` | The size needed for persistence. | `10Gi` | +| `fluentd.buffer.type` | Option to specify the Fluentd buffer as file/memory. If `fluentd.persistence.enabled` is `true`, this will be ignored. | `memory` | +| `fluentd.buffer.flushInterval` | How frequently to push logs to Sumo Logic. | `5s` | +| `fluentd.buffer.numThreads` | Increase number of http threads to Sumo. May be required in heavy logging/high DPM clusters. | `8` | +| `fluentd.buffer.chunkLimitSize` | The max size of each chunks: events will be written into chunks until the size of chunks become this size. | `1m` | +| `fluentd.buffer.queueChunkLimitSize` | Limit the number of queued chunks. | `128` | +| `fluentd.buffer.totalLimitSize` | The size limitation of this buffer plugin instance. | `128m` | +| `fluentd.buffer.filePaths` | File paths to buffer to, if Fluentd buffer type is specified as file above. Each sumologic output plugin buffers to its own unique file. | `{"events":"/fluentd/buffer/events","logs":{"containers":"/fluentd/buffer/logs.containers","default":"/fluentd/buffer/logs.default","kubelet":"/fluentd/buffer/logs.kubelet","systemd":"/fluentd/buffer/logs.systemd"},"metrics":{"apiserver":"/fluentd/buffer/metrics.apiserver","container":"/fluentd/buffer/metrics.container","controller":"/fluentd/buffer/metrics.controller","default":"/fluentd/buffer/metrics.default","kubelet":"/fluentd/buffer/metrics.kubelet","node":"/fluentd/buffer/metrics.node","scheduler":"/fluentd/buffer/metrics.scheduler","state":"/fluentd/buffer/metrics.state"},"traces":"/fluentd/buffer/traces"}` | +| `fluentd.buffer.extraConf` | Additional config for buffer settings | `Nil` | +| `fluentd.metadata.addOwners` | Option to control the enrichment of logs and metrics with pod owner metadata like `daemonset`, `deployment`, `replicaset`, `statefulset`. | `true` | +| `fluentd.metadata.addService` | Option to control the enrichment of logs and metrics with `service` metadata. | `true` | +| `fluentd.metadata.annotation_match` | Option to control capturing of annotations by metadata filter plugin. | `['sumologic\.com.*']` | +| `fluentd.metadata.apiGroups` | List of supported kubernetes API groups. | `['apps/v1']` | +| `fluentd.metadata.apiServerUrl` | Option to specify custom API server URL instead of the default, that is taken from KUBERNETES_SERVICE_HOST and KUBERNETES_SERVICE_PORT environment variables. Example: `"https://kubernetes.default.svc:443"`. | `""` | +| `fluentd.metadata.coreApiVersions` | List of supported kubernetes API versions. | `['v1']` | +| `fluentd.metadata.cacheSize` | Option to control the enabling of metadata filter plugin cache_size. | `10000` | +| `fluentd.metadata.cacheTtl` | Option to control the enabling of metadata filter plugin cache_ttl (in seconds). | `7200` | +| `fluentd.metadata.cacheRefresh` | Option to control the interval at which metadata cache is asynchronously refreshed (in seconds). | `3600` | +| `fluentd.metadata.cacheRefreshVariation` | Option to control the variation in seconds by which the cacheRefresh option is changed for each pod separately. For example, if cache refresh is 1 hour and variation is 15 minutes, then actual cache refresh interval will be a random value between 45 minutes and 1 hour 15 minutes, different for each pod. This helps spread the load on API server that the cache refresh induces. Setting this to 0 disables cache refresh variation. | `900` | +| `fluentd.metadata.cacheRefreshApiserverRequestDelay` | Option to control the delay with which cache refresh calls hit the api server.For example, if 0 then all metadata enrichment happen immediately. Setting this to a non-zero values ensures the traffic to api server is more distributed. | `0` | +| `fluentd.metadata.cacheRefreshExcludePodRegex` | Option to add regex for selectively disabling refresh for metadata in fluentd cache. For example, if regex is `(command-[a-z0-9]*)` then all pods starting with name `command` will not have their metadata refreshed and will be cleaned up from cache | `''` | +| `fluentd.metadata.pluginLogLevel` | Option to give plugin specific log level. | `error` | +| `fluentd.logs.enabled` | Flag to control deploying the Fluentd logs statefulsets. | `true` | +| `fluentd.logs.podDisruptionBudget` | Pod Disruption Budget for logs metadata enrichment. statefulset. | `{"minAvailable": 2}` | +| `fluentd.logs.statefulset.nodeSelector` | Node selector for Fluentd log statefulset. | `{}` | +| `fluentd.logs.statefulset.tolerations` | Tolerations for Fluentd log statefulset. | `[]` | +| `fluentd.logs.statefulset.affinity` | Affinity for Fluentd log statefulset. | `{}` | +| `fluentd.logs.statefulset.podAntiAffinity` | PodAntiAffinity for Fluentd log statefulset. | `soft` | +| `fluentd.logs.statefulset.topologySpreadConstraints` | TopologySpreadConstraints for Fluentd logs metadata enrichment statefulset. | `[]` | +| `fluentd.logs.statefulset.replicaCount` | Replica count for Fluentd log statefulset. | `3` | +| `fluentd.logs.statefulset.resources` | Resources for Fluentd log statefulset. | `{"limits":{"cpu":1,"memory":"1Gi"},"requests":{"cpu":0.5,"memory":"768Mi"}}` | +| `fluentd.logs.statefulset.podLabels` | Additional labels for fluentd log pods. | `{}` | +| `fluentd.logs.statefulset.podAnnotations` | Additional annotations for fluentd log pods. | `{}` | +| `fluentd.logs.statefulset.priorityClassName` | Priority class name for fluentd log pods. | `Nil` | +| `fluentd.logs.statefulset.initContainers` | Define init containers that will be run for fluentd logs statefulset. | `[]` | +| `fluentd.logs.autoscaling.enabled` | Option to turn autoscaling on for fluentd and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` | +| `fluentd.logs.autoscaling.minReplicas` | Default min replicas for autoscaling. | `3` | +| `fluentd.logs.autoscaling.maxReplicas` | Default max replicas for autoscaling. | `10` | +| `fluentd.logs.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `50` | +| `fluentd.logs.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `Nil` | +| `fluentd.logs.rawConfig` | Default log configuration. | `@include common.conf @include logs.conf` | +| `fluentd.logs.output.logFormat` | Format to post logs into Sumo: fields, json, json_merge, or text. | `fields` | +| `fluentd.logs.output.addTimestamp` | Option to control adding timestamp to logs. | `true` | +| `fluentd.logs.output.timestampKey` | Field name when add_timestamp is on. | `timestamp` | +| `fluentd.logs.output.pluginLogLevel` | Option to give plugin specific log level. | `error` | +| `fluentd.logs.output.extraConf` | Additional config parameters for sumologic output plugin | `Nil` | +| `fluentd.logs.extraLogs` | Additional config for custom log pipelines. | `Nil` | +| `fluentd.logs.containers.overrideRawConfig` | To override the entire contents of logs.source.containers.conf file. Leave empty for the default pipeline. | `Nil` | +| `fluentd.logs.containers.outputConf` | Default output configuration for container logs. | `@include logs.output.conf` | +| `fluentd.logs.containers.overrideOutputConf` | Override output section for container logs. Leave empty for the default output section. | `Nil` | +| `fluentd.logs.containers.sourceName` | Set the _sourceName metadata field in Sumo Logic. | `%{namespace}.%{pod}.%{container}` | +| `fluentd.logs.containers.sourceCategory` | Set the _sourceCategory metadata field in Sumo Logic. | `%{namespace}/%{pod_name}` | +| `fluentd.logs.containers.sourceCategoryPrefix` | Set the prefix, for _sourceCategory metadata. | `kubernetes/` | +| `fluentd.logs.containers.sourceCategoryReplaceDash` | Used to replace - with another character. | `/` | +| `fluentd.logs.containers.excludeContainerRegex` | A regular expression for containers. Matching containers will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` | +| `fluentd.logs.containers.excludeHostRegex` | A regular expression for hosts. Matching hosts will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` | +| `fluentd.logs.containers.excludeNamespaceRegex` | A regular expression for namespaces. Matching namespaces will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` | +| `fluentd.logs.containers.excludePodRegex` | A regular expression for pods. Matching pods will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` | +| `fluentd.logs.containers.k8sMetadataFilter.watch` | Option to control the enabling of metadata filter plugin watch. | `true` | +| `fluentd.logs.containers.k8sMetadataFilter.caFile` | path to CA file for Kubernetes server certificate validation. | `Nil` | +| `fluentd.logs.containers.k8sMetadataFilter.verifySsl` | Validate SSL certificates. | `true` | +| `fluentd.logs.containers.k8sMetadataFilter.clientCert` | Path to a client cert file to authenticate to the API server. | `Nil` | +| `fluentd.logs.containers.k8sMetadataFilter.clientKey` | Path to a client key file to authenticate to the API server. | `Nil` | +| `fluentd.logs.containers.k8sMetadataFilter.bearerTokenFile` | Path to a file containing the bearer token to use for authentication. | `Nil` | +| `fluentd.logs.containers.k8sMetadataFilter.tagToMetadataRegexp` | The regular expression used to extract kubernetes metadata (pod name, container name, namespace) from the current fluentd tag. | `.+?\.containers\.(?[^_]+)_(?[^_]+)_(?.+)-(?[a-z0-9]{64})\.log$` | +| `fluentd.logs.containers.extraFilterPluginConf` | To use additional filter plugins. | `Nil` | +| `fluentd.logs.containers.extraOutputPluginConf` | To use additional output plugins. | `Nil` | +| `fluentd.logs.containers.perContainerAnnotationsEnabled` | Enable container-level pod annotations. See [fluent-plugin-kubernetes-sumologic documentation](https://github.com/SumoLogic/sumologic-kubernetes-fluentd/tree/v1.12.2-sumo-6/fluent-plugin-kubernetes-sumologic#container-level-pod-annotations_) for more details. | `false` | +| `fluentd.logs.input.forwardExtraConf` | Configuration for the forward input plugin that receives logs from FluentBit. | `` | +| `fluentd.logs.kubelet.enabled` | Collect kubelet logs. | `true` | +| `fluentd.logs.kubelet.extraFilterPluginConf` | To use additional filter plugins. | `Nil` | +| `fluentd.logs.kubelet.extraOutputPluginConf` | To use additional output plugins. | `Nil` | +| `fluentd.logs.kubelet.outputConf` | Output configuration for kubelet. | `@include logs.output.conf` | +| `fluentd.logs.kubelet.overrideOutputConf` | Override output section for kubelet logs. Leave empty for the default output section. | `Nil` | +| `fluentd.logs.kubelet.sourceName` | Set the _sourceName metadata field in Sumo Logic. | `k8s_kubelet` | +| `fluentd.logs.kubelet.sourceCategory` | Set the _sourceCategory metadata field in Sumo Logic. | `kubelet` | +| `fluentd.logs.kubelet.sourceCategoryPrefix` | Set the prefix, for _sourceCategory metadata. | `kubernetes/` | +| `fluentd.logs.kubelet.sourceCategoryReplaceDash` | Used to replace - with another character. | `/` | +| `fluentd.logs.kubelet.excludeFacilityRegex` | A regular expression for facility. Matching facility will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` | +| `fluentd.logs.kubelet.excludeHostRegex` | A regular expression for hosts. Matching hosts will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` | +| `fluentd.logs.kubelet.excludePriorityRegex` | A regular expression for priority. Matching priority will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` | +| `fluentd.logs.kubelet.excludeUnitRegex` | A regular expression for unit. Matching unit will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` | +| `fluentd.logs.systemd.enabled` | Collect systemd logs. | `true` | +| `fluentd.logs.systemd.extraFilterPluginConf` | To use additional filter plugins. | `Nil` | +| `fluentd.logs.systemd.extraOutputPluginConf` | To use additional output plugins. | `Nil` | +| `fluentd.logs.systemd.outputConf` | Output configuration for systemd. | `@include logs.output.conf` | +| `fluentd.logs.systemd.overrideOutputConf` | Override output section for systemd logs. Leave empty for the default output section. | `Nil` | +| `fluentd.logs.systemd.sourceCategory` | Set the _sourceCategory metadata field in Sumo Logic. | `system` | +| `fluentd.logs.systemd.sourceCategoryPrefix` | Set the prefix, for _sourceCategory metadata. | `kubernetes/` | +| `fluentd.logs.systemd.sourceCategoryReplaceDash` | Used to replace - with another character. | `/` | +| `fluentd.logs.systemd.excludeFacilityRegex` | A regular expression for facility. Matching facility will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` | +| `fluentd.logs.systemd.excludeHostRegex` | A regular expression for hosts. Matching hosts will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` | +| `fluentd.logs.systemd.excludePriorityRegex` | A regular expression for priority. Matching priority will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` | +| `fluentd.logs.systemd.excludeUnitRegex` | A regular expression for unit. Matching unit will be excluded from Sumo. The logs will still be sent to FluentD. | `Nil` | +| `fluentd.logs.default.extraFilterPluginConf` | To use additional filter plugins. | `Nil` | +| `fluentd.logs.default.extraOutputPluginConf` | To use additional output plugins. | `Nil` | +| `fluentd.logs.default.outputConf` | Default log configuration (catch-all). | `@include logs.output.conf` | +| `fluentd.logs.default.overrideOutputConf` | Override output section for untagged logs. Leave empty for the default output section. | `Nil` | +| `fluentd.metrics.enabled` | Flag to control deploying the Fluentd metrics statefulsets. | `true` | +| `fluentd.metrics.podDisruptionBudget` | Pod Disruption Budget for metrics metadata enrichment. statefulset. | `{"minAvailable": 2}` | +| `fluentd.metrics.statefulset.nodeSelector` | Node selector for Fluentd metrics statefulset. | `{}` | +| `fluentd.metrics.statefulset.tolerations` | Tolerations for Fluentd metrics statefulset. | `[]` | +| `fluentd.metrics.statefulset.affinity` | Affinity for Fluentd metrics statefulset. | `{}` | +| `fluentd.metrics.statefulset.podAntiAffinity` | PodAntiAffinity for Fluentd metrics statefulset. | `soft` | +| `fluentd.metrics.statefulset.topologySpreadConstraints` | TopologySpreadConstraints for Fluentd metrics metadata enrichment statefulset. | `[]` | +| `fluentd.metrics.statefulset.replicaCount` | Replica count for Fluentd metrics statefulset. | `3` | +| `fluentd.metrics.statefulset.resources` | Resources for Fluentd metrics statefulset. | `{"limits":{"cpu":1,"memory":"1Gi"},"requests":{"cpu":0.5,"memory":"768Mi"}}` | +| `fluentd.metrics.statefulset.podLabels` | Additional labels for fluentd metrics pods. | `{}` | +| `fluentd.metrics.statefulset.podAnnotations` | Additional annotations for fluentd metrics pods. | `{}` | +| `fluentd.metrics.statefulset.priorityClassName` | Priority class name for fluentd metrics pods. | `Nil` | +| `fluentd.metrics.statefulset.initContainers` | Define init containers that will be run for fluentd metrics statefulset. | `[]` | +| `fluentd.metrics.autoscaling.enabled` | Option to turn autoscaling on for fluentd and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` | +| `fluentd.metrics.autoscaling.minReplicas` | Default min replicas for autoscaling. | `3` | +| `fluentd.metrics.autoscaling.maxReplicas` | Default max replicas for autoscaling. | `10` | +| `fluentd.metrics.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `50` | +| `fluentd.metrics.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `Nil` | +| `fluentd.metrics.rawConfig` | Raw config for fluentd metrics. | `@include common.conf @include metrics.conf` | +| `fluentd.metrics.outputConf` | Configuration for sumologic output plugin. | `@include metrics.output.conf` | +| `fluentd.metrics.extraEnvVars` | Additional environment variables for metrics metadata enrichment pods. | `Nil` | +| `fluentd.metrics.extraVolumes` | Additional volumes for metrics metadata enrichment pods. | `Nil` | +| `fluentd.metrics.extraVolumeMounts` | Additional volume mounts for metrics metadata enrichment pods. | `Nil` | +| `fluentd.metrics.extraOutputConf` | Additional config parameters for sumologic output plugin | `Nil` | +| `fluentd.metrics.extraFilterPluginConf` | To use additional filter plugins. | `Nil` | +| `fluentd.metrics.extraOutputPluginConf` | To use additional output plugins. | `Nil` | +| `fluentd.metrics.overrideOutputConf` | Override output section for metrics. Leave empty for the default output section. | `Nil` | +| `fluentd.monitoring` | Configuration of fluentd monitoring metrics. Adds the `fluentd_input_status_num_records_total` metric for input and the `fluentd_output_status_num_records_total` metric for output. | `{"input": false, "output": false}` | +| `fluentd.events.enabled` | If enabled, collect K8s events. | `true` | +| `fluentd.events.statefulset.nodeSelector` | Node selector for Fluentd events statefulset. | `{}` | +| `fluentd.events.statefulset.affinity` | Affinity for Fluentd events statefulset. | `{}` | +| `fluentd.events.statefulset.tolerations` | Tolerations for Fluentd events statefulset. | `[]` | +| `fluentd.events.statefulset.resources` | Resources for Fluentd log statefulset. | `{"limits":{"cpu":"100m","memory":"256Mi"},"requests":{"cpu":"100m","memory":"256Mi"}}` | +| `fluentd.events.statefulset.podLabels` | Additional labels for fluentd events pods. | `{}` | +| `fluentd.events.statefulset.podAnnotations` | Additional annotations for fluentd events pods. | `{}` | +| `fluentd.events.statefulset.priorityClassName` | Priority class name for fluentd events pods. | `Nil` | +| `fluentd.events.statefulset.initContainers` | Define init containers that will be run for fluentd events statefulset. | `[]` | +| `fluentd.events.sourceName` | Source name for the Events source. Default: "events" | `Nil` | +| `fluentd.events.sourceCategory` | Source category for the Events source. Default: "{clusterName}/events" | `Nil` | +| `fluentd.events.overrideOutputConf` | Override output section for events. Leave empty for the default output section. | `Nil` | +| `metrics-server.enabled` | Set the enabled flag to true for enabling metrics-server. This is required before enabling fluentd autoscaling unless you have an existing metrics-server in the cluster. | `false` | +| `metrics-server.fullnameOverride` | Used to override the chart's full name. | `Nil` | +| `metrics-server.args` | Arguments for metric server. | `["--kubelet-insecure-tls","--kubelet-preferred-address-types=InternalIP,ExternalIP,Hostname"]` | +| `fluent-bit.fullnameOverride` | Used to override the chart's full name. | `Nil` | +| `fluent-bit.resources` | Resources for Fluent-bit daemonsets. | `{}` | +| `fluent-bit.enabled` | Flag to control deploying Fluent-bit Helm sub-chart. | `true` | +| `fluent-bit.config.service` | Configure Fluent-bit Helm sub-chart service. | [fluent-bit.config.service in values.yaml](https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/v2.0.0/deploy/helm/sumologic/values.yaml#L817-L827) | +| `fluent-bit.config.inputs` | Configure Fluent-bit Helm sub-chart inputs. Configuration for logs from different container runtimes is described in [Container log parsing](../../docs/ContainerLogs.md). | [fluent-bit.config.inputs in values.yaml](https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/v2.0.0/deploy/helm/sumologic/values.yaml#L828-L895) | +| `fluent-bit.config.outputs` | Configure Fluent-bit Helm sub-chart outputs. | [fluent-bit.config.outputs in values.yaml](https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/v2.0.0/deploy/helm/sumologic/values.yaml#L896-L906) | +| `fluent-bit.config.customParsers` | Configure Fluent-bit Helm sub-chart customParsers. | [fluent-bit.config.customParsers in values.yaml](https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/v2.0.0/deploy/helm/sumologic/values.yaml#L907-L917) | +| `fluent-bit.service.labels` | Labels for fluent-bit service. | `{sumologic.com/scrape: "true"}` | +| `fluent-bit.podLabels` | Additional labels for fluent-bit pods. | `{}` | +| `fluent-bit.podAnnotations` | Additional annotations for fluent-bit pods. | `{}` | +| `fluent-bit.service.flush` | Frequency to flush fluent-bit buffer to fluentd. | `5` | +| `fluent-bit.metrics.enabled` | Enable metrics from fluent-bit. | `true` | +| `fluent-bit.env` | Environment variables for fluent-bit. | `[{"name":"FLUENTD_LOGS_SVC","valueFrom":{"configMapKeyRef":{"key":"fluentdLogs","name":"sumologic-configmap"}}},{"name":"NAMESPACE","valueFrom":{"fieldRef":{"fieldPath":"metadata.namespace"}}}]` | +| `fluent-bit.backend.type` | Set the backend to which Fluent-Bit should flush the information it gathers | `forward` | +| `fluent-bit.backend.forward.host` | Target host where Fluent-Bit or Fluentd are listening for Forward messages. | `${FLUENTD_LOGS_SVC}.${NAMESPACE}.svc.cluster.local.` | +| `fluent-bit.backend.forward.port` | TCP Port of the target service. | `24321` | +| `fluent-bit.backend.forward.tls` | Enable or disable TLS support. | `off` | +| `fluent-bit.backend.forward.tls_verify` | Force certificate validation. | `on` | +| `fluent-bit.backend.forward.tls_debug` | Set TLS debug verbosity level. It accept the following values: 0-4. | `1` | +| `fluent-bit.backend.forward.shared_key` | A key string known by the remote Fluentd used for authorization. | `Nil` | +| `fluent-bit.trackOffsets` | Specify whether to track the file offsets for tailing docker logs. This allows fluent-bit to pick up where it left after pod restarts but requires access to a hostPath. | `true` | +| `fluent-bit.tolerations` | Optional daemonset tolerations. | `[{"effect":"NoSchedule","operator":"Exists"}]` | +| `fluent-bit.input.systemd.enabled` | Enable systemd input. | `true` | +| `fluent-bit.parsers.enabled` | Enable custom parsers. | `true` | +| `fluent-bit.parsers.regex` | List of regex parsers. | `[{"name":"multi_line","regex":"(?\u003clog\u003e^{\"log\":\"\\d{4}-\\d{1,2}-\\d{1,2}.\\d{2}:\\d{2}:\\d{2}.*)"}]` | +| `kube-prometheus-stack.kubeTargetVersionOverride` | Provide a target gitVersion of K8S, in case .Capabilites.KubeVersion is not available (e.g. helm template). Changing this may break Sumo Logic apps. | `1.13.0-0` | +| `kube-prometheus-stack.enabled` | Flag to control deploying Prometheus Operator Helm sub-chart. | `true` | +| `kube-prometheus-stack.fullnameOverride` | Used to override the chart's full name. | `Nil` | +| `kube-prometheus-stack.alertmanager.enabled` | Deploy alertmanager. | `false` | +| `kube-prometheus-stack.grafana.enabled` | If true, deploy the grafana sub-chart. | `false` | +| `kube-prometheus-stack.grafana.defaultDashboardsEnabled` | Deploy default dashboards. These are loaded using the sidecar. | `false` | +| `kube-prometheus-stack.prometheusOperator.podLabels` | Additional labels for prometheus operator pods. | `{}` | +| `kube-prometheus-stack.prometheusOperator.podAnnotations` | Additional annotations for prometheus operator pods. | `{}` | +| `kube-prometheus-stack.prometheusOperator.resources` | Resource limits for prometheus operator. Uses sub-chart defaults. | `{}` | +| `kube-prometheus-stack.prometheusOperator.admissionWebhooks.enabled` | Create PrometheusRules admission webhooks. Mutating webhook will patch PrometheusRules objects indicating they were validated. Validating webhook will check the rules syntax. | `false` | +| `kube-prometheus-stack.prometheusOperator.tls.enabled` | Enable TLS in prometheus operator. | `false` | +| `kube-prometheus-stack.kube-state-metrics.fullnameOverride` | Used to override the chart's full name. | `Nil` | +| `kube-prometheus-stack.kube-state-metrics.resources` | Resource limits for kube state metrics. Uses sub-chart defaults. | `{}` | +| `kube-prometheus-stack.kube-state-metrics.customLabels` | Custom labels to apply to service, deployment and pods. Uses sub-chart defaults. | `{}` | +| `kube-prometheus-stack.kube-state-metrics.podAnnotations` | Additional annotations for pods in the DaemonSet. Uses sub-chart defaults. | `{}` | +| `kube-prometheus-stack.prometheus.additionalServiceMonitors` | List of ServiceMonitor objects to create. | `[{"additionalLabels":{"app":"collection-sumologic-fluentd-logs"},"endpoints":[{"port":"metrics"}],"name":"collection-sumologic-fluentd-logs","namespaceSelector":{"matchNames":["sumologic"]},"selector":{"matchLabels":{"app":"collection-sumologic-fluentd-logs"}}},{"additionalLabels":{"app":"collection-sumologic-fluentd-metrics"},"endpoints":[{"port":"metrics"}],"name":"collection-sumologic-fluentd-metrics","namespaceSelector":{"matchNames":["sumologic"]},"selector":{"matchLabels":{"app":"collection-sumologic-fluentd-metrics"}}},{"additionalLabels":{"app":"collection-sumologic-fluentd-events"},"endpoints":[{"port":"metrics"}],"name":"collection-sumologic-fluentd-events","namespaceSelector":{"matchNames":["sumologic"]},"selector":{"matchLabels":{"app":"collection-sumologic-fluentd-events"}}},{"additionalLabels":{"app":"collection-fluent-bit"},"endpoints":[{"path":"/api/v1/metrics/prometheus","port":"metrics"}],"name":"collection-fluent-bit","namespaceSelector":{"matchNames":["sumologic"]},"selector":{"matchLabels":{"app":"fluent-bit"}}},{"additionalLabels":{"app":"collection-sumologic-otelcol"},"endpoints":[{"port":"metrics"}],"name":"collection-sumologic-otelcol","namespaceSelector":{"matchNames":["sumologic"]},"selector":{"matchLabels":{"app":"collection-sumologic-otelcol"}}}]` | +| `kube-prometheus-stack.prometheus.prometheusSpec.resources` | Resource limits for prometheus. Uses sub-chart defaults. | `{}` | +| `kube-prometheus-stack.prometheus.prometheusSpec.thanos.baseImage` | Base image for Thanos container. | `quay.io/thanos/thanos` | +| `kube-prometheus-stack.prometheus.prometheusSpec.thanos.version` | Image tag for Thanos container. | `v0.10.0` | +| `kube-prometheus-stack.prometheus.prometheusSpec.containers` | Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod. | `[{"env":[{"name":"FLUENTD_METRICS_SVC","valueFrom":{"configMapKeyRef":{"key":"fluentdMetrics","name":"sumologic-configmap"}}},{"name":"NAMESPACE","valueFrom":{"configMapKeyRef":{"key":"fluentdNamespace","name":"sumologic-configmap"}}}],"name":"prometheus-config-reloader"}]` | +| `kube-prometheus-stack.prometheus.prometheusSpec.podMetadata.labels` | Add custom pod labels to prometheus pods | `{}` | +| `kube-prometheus-stack.prometheus.prometheusSpec.podMetadata.annotations` | Add custom pod annotations to prometheus pods | `{}` | +| `kube-prometheus-stack.prometheus.prometheusSpec.remoteWrite` | If specified, the remote_write spec. | See values.yaml | +| `kube-prometheus-stack.prometheus.prometheusSpec.walCompression` | Enables walCompression in Prometheus | `true` | +| `kube-prometheus-stack.prometheus-node-exporter.fullnameOverride` | Used to override the chart's full name. | `Nil` | +| `kube-prometheus-stack.prometheus-node-exporter.podLabels` | Additional labels for prometheus-node-exporter pods. | `{}` | +| `kube-prometheus-stack.prometheus-node-exporter.podAnnotations` | Additional annotations for prometheus-node-exporter pods. | `{}` | +| `kube-prometheus-stack.prometheus-node-exporter.resources` | Resource limits for node exporter. Uses sub-chart defaults. | `{}` | +| `falco.enabled` | Flag to control deploying Falco Helm sub-chart. | `false` | +| `falco.fullnameOverride` | Used to override the chart's full name. | `Nil` | +| `falco.addKernelDevel` | Flag to control installation of `kernel-devel` on nodes using MachineConfig, required to build falco modules (only for OpenShift) | `true` | +| `falco.extraInitContainers` | InitContainers for Falco pod | `[{'name': 'init-falco', 'image': 'busybox', 'command': ['sh', '-c', 'while [ -f /host/etc/redhat-release ] && [ -z "$(ls /host/usr/src/kernels)" ] ; do\necho "waiting for kernel headers to be installed"\nsleep 3\ndone\n'], 'volumeMounts': [{'mountPath': '/host/usr', 'name': 'usr-fs', 'readOnly': True}, {'mountPath': '/host/etc', 'name': 'etc-fs', 'readOnly': True}]}]` | +| `falco.ebpf.enabled` | Enable eBPF support for Falco instead of falco-probe kernel module. Set to true for GKE. | `false` | +| `falco.falco.jsonOutput` | Output events in json. | `true` | +| `falco.pullSecrets` | Pull secrets for falco images. For more information on using Kubernetes secrets with container registries please refer to [Creating a Secret with a Docker config at kubernetes.io](https://kubernetes.io/docs/concepts/containers/images/#creating-a-secret-with-a-docker-config). | `[]` | +| `telegraf-operator.enabled` | Flag to control deploying Telegraf Operator Helm sub-chart. | `false` | +| `telegraf-operator.fullnameOverride` | Used to override the chart's full name. | `Nil` | +| `telegraf-operator.replicaCount` | Replica count for Telegraf Operator pods. | 1 | +| `telegraf-operator.classes.secretName` | Secret name in which the Telegraf Operator configuration will be stored. | `telegraf-operator-classes` | +| `telegraf-operator.default` | Name of the default output configuration. | `sumologic-prometheus` | +| `telegraf-operator.data` | Telegraf sidecar configuration. | `{"sumologic-prometheus": "[[outputs.prometheus_client]]\\n ## Configuration details:\\n ## https://github.com/influxdata/telegraf/tree/master/plugins/outputs/prometheus_client#configuration\\n listen = ':9273'\\n metric_version = 2\\n"}` | +| `otelagent.enabled` | Enables OpenTelemetry Collector Agent mode DaemonSet. | `false` | +| `otelcol.deployment.replicas` | Set the number of OpenTelemetry Collector replicas. | `1` | +| `otelcol.deployment.resources.limits.memory` | Sets the OpenTelemetry Collector memory limit. | `2Gi` | +| `otelcol.deployment.priorityClassName` | Priority class name for OpenTelemetry Collector log pods. | `Nil` | +| `otelcol.metrics.enabled` | Enable or disable generation of the metrics from Collector. | `true` | +| `otelcol.config.service.pipelines.traces.receivers` | Sets the list of enabled receivers. | `{jaeger, opencensus, otlp, zipkin}` | +| `otelcol.config.exporters.zipkin.timeout` | Sets the Zipkin (default) exporter timeout. Append the unit, e.g. `s` when setting the parameter | `5s` | +| `otelcol.config.exporters.logging.loglevel` | When tracing debug logging exporter is enabled, sets the verbosity level. Use either `info` or `debug`. | `info` | +| `otelcol.config.service.pipelines.traces.exporters` | Sets the list of exporters enabled within OpenTelemetry Collector. Available values: `zipkin`, `logging`. Set to `{zipkin, logging}` to enable logging debugging exporter. | `{zipkin}` | +| `otelcol.config.service.pipelines.traces.processors` | Sets the list of enabled OpenTelemetry Collector processors. | `{memory_limiter, k8s_tagger, source, resource, batch, queued_retry}` | +| `otelcol.config.processors.memory_limiter.limit_mib` | Sets the OpenTelemetry Collector memory limitter plugin value (in MiB). Should be at least 100 Mib less than the value of `otelcol.deployment.resources.limits.memory`. | `1900` | +| `otelcol.config.processors.batch.send_batch_size` | Sets the preferred size of batch (in number of spans). | `256` | +| `otelcol.config.processors.batch.send_batch_max_size` | Sets the maximum allowed size of a batch (in number of spans). Use with caution, setting too large value might cause 413 Payload Too Large errors. | `512` | +| `otelcol.logLevelFilter` | Do not send otelcol logs if `true`. | `true` | +| `metadata.image.repository` | Image repository for otelcol docker container. | `public.ecr.aws/sumologic/sumologic-otel-collector` | +| `metadata.image.tag` | Image tag for otelcol docker container. | `0.0.18` | +| `metadata.image.pullPolicy` | Image pullPolicy for otelcol docker container. | `IfNotPresent` | +| `metadata.securityContext` | The securityContext configuration for otelcol. | `{"fsGroup": 999}` | +| `metadata.podLabels` | Additional labels for all otelcol pods. | `{}` | +| `metadata.podAnnotations` | Additional annotations for all otelcol pods. | `{}` | +| `metadata.serviceLabels` | Additional labels for all otelcol pods. | `{}` | +| `metadata.persistence.enabled` | Flag to control persistence for OpenTelemetry Collector. | `true` | +| `metadata.persistence.storageClass` | Defines storageClassName for the PersistentVolumeClaim which is used to provide persistence for OpenTelemetry Collector. | `Nil` | +| `metadata.persistence.accessMode` | The accessMode for the volume which is used to provide persistence for OpenTelemetry Collector. | `ReadWriteOnce` | +| `metadata.persistence.size` | Size of the volume which is used to provide persistence for OpenTelemetry Collector. | `10Gi` | +| `metadata.persistence.pvcLabels` | Additional PersistentVolumeClaim labels for all OpenTelemetry Collector pods. | `{}` | +| `metadata.metrics.enabled` | Flag to control deploying the otelcol metrics statefulsets. | `true` | +| `metadata.metrics.logLevel` | Flag to control logging level for OpenTelemetry Collector for metrics. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. | `info` | +| `metadata.metrics.config` | Configuration for metrics otelcol. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/Configuration.md. | [metadata.metrics.config in values.yaml](./values.yaml#L3085-L3385) | +| `metadata.metrics.statefulset.containers.otelcol.startupProbe` | Startup probe configuration for metrics otelcol container. | `{ periodSeconds: 3, failureThreshold: 60}` | +| `metadata.metrics.statefulset.nodeSelector` | Node selector for metrics metadata enrichment (otelcol) statefulset. | `{}` | +| `metadata.metrics.statefulset.tolerations` | Tolerations for metrics metadata enrichment (otelcol) statefulset. | `[]` | +| `metadata.metrics.statefulset.affinity` | Affinity for metrics metadata enrichment (otelcol) statefulset. | `{}` | +| `metadata.metrics.statefulset.podAntiAffinity` | PodAntiAffinity for metrics metadata enrichment (otelcol) statefulset. | `soft` | +| `metadata.metrics.statefulset.topologySpreadConstraints` | TopologySpreadConstraints for metrics metadata enrichment (otelcol) statefulset. | `[]` | +| `metadata.metrics.statefulset.replicaCount` | Replica count for metrics metadata enrichment (otelcol) statefulset. | `3` | +| `metadata.metrics.statefulset.resources` | Resources for metrics metadata enrichment (otelcol) statefulset. | `{"limits":{"cpu":1,"memory":"1Gi"},"requests":{"cpu":0.5,"memory":"768Mi"}}` | +| `metadata.metrics.statefulset.priorityClassName` | Priority class name for metrics metadata enrichment (otelcol) pods. | `Nil` | +| `metadata.metrics.statefulset.podLabels` | Additional labels for metrics metadata enrichment (otelcol) pods. | `{}` | +| `metadata.metrics.statefulset.podAnnotations` | Additional annotations for metrics metadata enrichment (otelcol) pods. | `{}` | +| `metadata.metrics.statefulset.containers.metadata.securityContext` | The securityContext configuration for otelcol container for metrics metadata enrichment statefulset. | `{}` | +| `metadata.metrics.statefulset.extraEnvVars` | Additional environment variables for metrics metadata enrichment (otelcol) pods. | `Nil` | +| `metadata.metrics.statefulset.extraVolumes` | Additional volumes for metrics metadata enrichment (otelcol) pods. | `Nil` | +| `metadata.metrics.statefulset.extraVolumeMounts` | Additional volume mounts for metrics metadata enrichment (otelcol) pods. | `Nil` | +| `metadata.metrics.autoscaling.enabled` | Option to turn autoscaling on for metrics metadata enrichment (otelcol) and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` | +| `metadata.metrics.autoscaling.minReplicas` | Default min replicas for autoscaling. | `3` | +| `metadata.metrics.autoscaling.maxReplicas` | Default max replicas for autoscaling | `10` | +| `metadata.metrics.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `50` | +| `metadata.metrics.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `Nil` | +| `metadata.metrics.podDisruptionBudget` | Pod Disruption Budget for metrics metadata enrichment (otelcol) statefulset. | `{"minAvailable": 2}` | +| `metadata.logs.enabled` | Flag to control deploying the otelcol logs statefulsets. | `true` | +| `metadata.logs.logLevel` | Flag to control logging level for OpenTelemetry Collector for logs. Can be `debug`, `info`, `warn`, `error`, `dpanic`, `panic`, `fatal`. | `info` | +| `metadata.logs.config` | Configuration for logs otelcol. See also https://github.com/SumoLogic/sumologic-otel-collector/blob/main/docs/Configuration.md. | [metadata.metrics.config in values.yaml](./values.yaml#L3457-L3744) | +| `metadata.logs.statefulset.containers.otelcol.startupProbe` | Startup probe configuration for logs otelcol container. | `{ periodSeconds: 3, failureThreshold: 60}` | +| `metadata.logs.statefulset.nodeSelector` | Node selector for logs metadata enrichment (otelcol) statefulset. | `{}` | +| `metadata.logs.statefulset.tolerations` | Tolerations for logs metadata enrichment (otelcol) statefulset. | `[]` | +| `metadata.logs.statefulset.affinity` | Affinity for logs metadata enrichment (otelcol) statefulset. | `{}` | +| `metadata.logs.statefulset.podAntiAffinity` | PodAntiAffinity for logs metadata enrichment (otelcol) statefulset. | `soft` | +| `metadata.logs.statefulset.topologySpreadConstraints` | TopologySpreadConstraints for logs metadata enrichment (otelcol) statefulset. | `[]` | +| `metadata.logs.statefulset.replicaCount` | Replica count for logs metadata enrichment (otelcol) statefulset. | `3` | +| `metadata.logs.statefulset.resources` | Resources for logs metadata enrichment (otelcol) statefulset. | `{"limits":{"cpu":1,"memory":"1Gi"},"requests":{"cpu":0.5,"memory":"768Mi"}}` | +| `metadata.logs.statefulset.priorityClassName` | Priority class name for logs metadata enrichment (otelcol) pods. | `Nil` | +| `metadata.logs.statefulset.podLabels` | Additional labels for logs metadata enrichment (otelcol) pods. | `{}` | +| `metadata.logs.statefulset.podAnnotations` | Additional annotations for logs metadata enrichment (otelcol) pods. | `{}` | +| `metadata.logs.statefulset.containers.metadata.securityContext` | The securityContext configuration for otelcol container for logs metadata enrichment statefulset. | `{}` | +| `metadata.logs.statefulset.extraEnvVars` | Additional environment variables for logs metadata enrichment (otelcol) pods. | `Nil` | +| `metadata.logs.statefulset.extraVolumes` | Additional volumes for logs metadata enrichment (otelcol) pods. | `Nil` | +| `metadata.logs.statefulset.extraVolumeMounts` | Additional volume mounts for logs metadata enrichment (otelcol) pods. | `Nil` | +| `metadata.logs.autoscaling.enabled` | Option to turn autoscaling on for logs metadata enrichment (otelcol) and specify params for HPA. Autoscaling needs metrics-server to access cpu metrics. | `false` | +| `metadata.logs.autoscaling.minReplicas` | Default min replicas for autoscaling. | `3` | +| `metadata.logs.autoscaling.maxReplicas` | Default max replicas for autoscaling | `10` | +| `metadata.logs.autoscaling.targetCPUUtilizationPercentage` | The desired target CPU utilization for autoscaling. | `50` | +| `metadata.logs.autoscaling.targetMemoryUtilizationPercentage` | The desired target memory utilization for autoscaling. | `Nil` | +| `metadata.logs.podDisruptionBudget` | Pod Disruption Budget for logs metadata enrichment (otelcol) statefulset. | `{"minAvailable": 2}` | +| `tailing-sidecar-operator.enabled` | Flag to control deploying Tailing Sidecar Operator Helm sub-chart. | `false` | +| `tailing-sidecar-operator.fullnameOverride` | Used to override the chart's full name. | `Nil` | diff --git a/deploy/helm/sumologic/conf/setup/dashboards.sh b/deploy/helm/sumologic/conf/setup/dashboards.sh new file mode 100755 index 0000000000..33c7b303f2 --- /dev/null +++ b/deploy/helm/sumologic/conf/setup/dashboards.sh @@ -0,0 +1,139 @@ +#!/bin/bash + +SUMOLOGIC_ACCESSID=${SUMOLOGIC_ACCESSID:=""} +readonly SUMOLOGIC_ACCESSID +SUMOLOGIC_ACCESSKEY=${SUMOLOGIC_ACCESSKEY:=""} +readonly SUMOLOGIC_ACCESSKEY +SUMOLOGIC_BASE_URL=${SUMOLOGIC_BASE_URL:=""} +readonly SUMOLOGIC_BASE_URL + +INTEGRATIONS_FOLDER_NAME="Sumo Logic Integrations" +K8S_FOLDER_NAME="Kubernetes" +K8S_APP_UUID="162ceac7-166a-4475-8427-65e170ae9837" + +function load_dashboards_folder_id() { + local ADMIN_FOLDER_JOB_ID + ADMIN_FOLDER_JOB_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended | jq '.id' | tr -d '"' )" + readonly ADMIN_FOLDER_JOB_ID + + local ADMIN_FOLDER_JOB_STATUS + ADMIN_FOLDER_JOB_STATUS="InProgress" + while [ "${ADMIN_FOLDER_JOB_STATUS}" = "InProgress" ]; do + ADMIN_FOLDER_JOB_STATUS="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended/"${ADMIN_FOLDER_JOB_ID}"/status | jq '.status' | tr -d '"' )" + + sleep 1 + done + + if [ "${ADMIN_FOLDER_JOB_STATUS}" != "Success" ]; then + echo "Could not fetch data from the \"Admin Recommended\" content folder. The K8s Dashboards won't be installed." + echo "You can still install them manually:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + exit 1 + fi + + local ADMIN_FOLDER + ADMIN_FOLDER="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended/"${ADMIN_FOLDER_JOB_ID}"/result )" + readonly ADMIN_FOLDER + + local ADMIN_FOLDER_CHILDREN + ADMIN_FOLDER_CHILDREN="$( echo "${ADMIN_FOLDER}" | jq '.children[]')" + readonly ADMIN_FOLDER_CHILDREN + + local ADMIN_FOLDER_ID + ADMIN_FOLDER_ID="$( echo "${ADMIN_FOLDER}" | jq '.id' | tr -d '"')" + readonly ADMIN_FOLDER_ID + + INTEGRATIONS_FOLDER_ID="$( echo "${ADMIN_FOLDER_CHILDREN}" | \ + jq -r "select(.name == \"${INTEGRATIONS_FOLDER_NAME}\") | .id" )" + + if [[ -z "${INTEGRATIONS_FOLDER_ID}" ]]; then + INTEGRATIONS_FOLDER_ID="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${INTEGRATIONS_FOLDER_NAME}\",\"parentId\":\"${ADMIN_FOLDER_ID}\",\"description\":\"Content provided by the Sumo Logic integrations.\"}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders | \ + jq -r " .id" )" + fi + + local INTEGRATIONS_FOLDER_CHILDREN + INTEGRATIONS_FOLDER_CHILDREN="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/"${INTEGRATIONS_FOLDER_ID}" | \ + jq '.children[]')" + readonly INTEGRATIONS_FOLDER_CHILDREN + + K8S_FOLDER_ID="$( echo "${INTEGRATIONS_FOLDER_CHILDREN}" | \ + jq -r "select(.name == \"${K8S_FOLDER_NAME}\") | .id" )" +} + +load_dashboards_folder_id + +if [[ -z "${K8S_FOLDER_ID}" ]]; then + APP_INSTALL_JOB_RESPONSE="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${K8S_FOLDER_NAME}\",\"destinationFolderId\":\"${INTEGRATIONS_FOLDER_ID}\",\"description\":\"Kubernetes dashboards provided by Sumo Logic.\"}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/"${K8S_APP_UUID}"/install )" + readonly APP_INSTALL_JOB_RESPONSE + + APP_INSTALL_JOB_ID="$(echo "${APP_INSTALL_JOB_RESPONSE}" | jq '.id' | tr -d '"' )" + readonly APP_INSTALL_JOB_ID + + APP_INSTALL_JOB_STATUS="InProgress" + while [ "${APP_INSTALL_JOB_STATUS}" = "InProgress" ]; do + APP_INSTALL_JOB_STATUS="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/install/"${APP_INSTALL_JOB_ID}"/status | jq '.status' | tr -d '"' )" + + sleep 1 + done + + if [ "${APP_INSTALL_JOB_STATUS}" != "Success" ]; then + ERROR_MSG="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/install/"${APP_INSTALL_JOB_ID}"/status )" + echo "${ERROR_MSG}" + + echo "Installation of the K8s Dashboards failed." + echo "You can still install them manually:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + exit 2 + else + load_dashboards_folder_id + + ORG_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/account/contract | jq '.orgId' | tr -d '"' )" + readonly ORG_ID + + PERMS_ERRORS=$( curl -XPUT -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"contentPermissionAssignments\": [{\"permissionName\": \"View\",\"sourceType\": \"org\",\"sourceId\": \"${ORG_ID}\",\"contentId\": \"${K8S_FOLDER_ID}\"}],\"notifyRecipients\":false,\"notificationMessage\":\"\"}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/"${K8S_FOLDER_ID}"/permissions/add | jq '.errors' ) + readonly PERMS_ERRORS + + if [ "${PERMS_ERRORS}" != "null" ]; then + echo "Setting permissions for the installed content failed." + echo "${PERMS_ERRORS}" + fi + + echo "Installation of the K8s Dashboards succeeded." + fi +else + echo "The K8s Dashboards have been already installed." + echo "You can (re)install them manually with:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" +fi diff --git a/deploy/helm/sumologic/conf/setup/monitors.sh b/deploy/helm/sumologic/conf/setup/monitors.sh index 9b2f35a205..e109d93986 100644 --- a/deploy/helm/sumologic/conf/setup/monitors.sh +++ b/deploy/helm/sumologic/conf/setup/monitors.sh @@ -93,7 +93,7 @@ if [[ -z "${MONITORS_FOLDER_ID}" ]]; then {{- end }} || { echo "Error during applying Terraform monitors."; exit 1; } else - echo "The monitors were already installed in ${MONITORS_FOLDER_NAME}." + echo "The monitors have been already installed in ${MONITORS_FOLDER_NAME}." echo "You can (re)install them manually with:" echo "https://github.com/SumoLogic/terraform-sumologic-sumo-logic-monitor/tree/main/monitor_packages/kubernetes" fi diff --git a/deploy/helm/sumologic/conf/setup/setup.sh b/deploy/helm/sumologic/conf/setup/setup.sh index 054f3972e1..c4dc4e435a 100755 --- a/deploy/helm/sumologic/conf/setup/setup.sh +++ b/deploy/helm/sumologic/conf/setup/setup.sh @@ -151,6 +151,15 @@ echo "You can install them manually later with:" echo "https://github.com/SumoLogic/terraform-sumologic-sumo-logic-monitor/tree/main/monitor_packages/kubernetes" {{- end }} +# Setup Sumo Logic dashboards if enabled +{{- if .Values.sumologic.setup.dashboards.enabled }} +bash /etc/terraform/dashboards.sh +{{- else }} +echo "Installation of the Sumo Logic dashboards is disabled." +echo "You can install them manually later with:" +echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" +{{- end }} + # Cleanup env variables export SUMOLOGIC_BASE_URL= export SUMOLOGIC_ACCESSKEY= diff --git a/deploy/helm/sumologic/templates/NOTES.txt b/deploy/helm/sumologic/templates/NOTES.txt index 3536cdb86c..1f9e945053 100644 --- a/deploy/helm/sumologic/templates/NOTES.txt +++ b/deploy/helm/sumologic/templates/NOTES.txt @@ -28,7 +28,13 @@ fails please refer to the following to create them manually: https://github.com/SumoLogic/sumologic-kubernetes-collection/blob/2b3ca63/deploy/docs/Installation_with_Helm.md#prerequisite {{- if not (.Values.sumologic.setup.monitors.enabled) }} -echo "Installation of the Sumo Logic monitors is disabled." -echo "You can install them with:" -echo "https://github.com/SumoLogic/terraform-sumologic-sumo-logic-monitor/tree/main/monitor_packages/kubernetes" +Installation of the Sumo Logic monitors is disabled. +You can install them with: +https://github.com/SumoLogic/terraform-sumologic-sumo-logic-monitor/tree/main/monitor_packages/kubernetes +{{- end }} + +{{- if not (.Values.sumologic.setup.dashboards.enabled) }} +Installation of the Sumo Logic dashboards is disabled. +You can install them manually later with: +https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app {{- end }} \ No newline at end of file diff --git a/deploy/helm/sumologic/values.yaml b/deploy/helm/sumologic/values.yaml index 73b075ed7b..86bbb9f190 100644 --- a/deploy/helm/sumologic/values.yaml +++ b/deploy/helm/sumologic/values.yaml @@ -132,6 +132,10 @@ sumologic: ## A list of emails to send notifications from monitors notificationEmails: [] + dashboards: + ## If enabled, a pre-install hook will install k8s dashboards in Sumo Logic + enabled: true + collector: ## Configuration of additional collector fields ## https://help.sumologic.com/Manage/Fields#http-source-fields diff --git a/tests/helm/terraform/static/all_fields.output.yaml b/tests/helm/terraform/static/all_fields.output.yaml index 5fed6a80a3..636d3e4bc6 100644 --- a/tests/helm/terraform/static/all_fields.output.yaml +++ b/tests/helm/terraform/static/all_fields.output.yaml @@ -61,6 +61,146 @@ data: cd "${target}" && bash setup.sh done + dashboards.sh: | + #!/bin/bash + + SUMOLOGIC_ACCESSID=${SUMOLOGIC_ACCESSID:=""} + readonly SUMOLOGIC_ACCESSID + SUMOLOGIC_ACCESSKEY=${SUMOLOGIC_ACCESSKEY:=""} + readonly SUMOLOGIC_ACCESSKEY + SUMOLOGIC_BASE_URL=${SUMOLOGIC_BASE_URL:=""} + readonly SUMOLOGIC_BASE_URL + + INTEGRATIONS_FOLDER_NAME="Sumo Logic Integrations" + K8S_FOLDER_NAME="Kubernetes" + K8S_APP_UUID="162ceac7-166a-4475-8427-65e170ae9837" + + function load_dashboards_folder_id() { + local ADMIN_FOLDER_JOB_ID + ADMIN_FOLDER_JOB_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended | jq '.id' | tr -d '"' )" + readonly ADMIN_FOLDER_JOB_ID + + local ADMIN_FOLDER_JOB_STATUS + ADMIN_FOLDER_JOB_STATUS="InProgress" + while [ "${ADMIN_FOLDER_JOB_STATUS}" = "InProgress" ]; do + ADMIN_FOLDER_JOB_STATUS="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended/"${ADMIN_FOLDER_JOB_ID}"/status | jq '.status' | tr -d '"' )" + + sleep 1 + done + + if [ "${ADMIN_FOLDER_JOB_STATUS}" != "Success" ]; then + echo "Could not fetch data from the \"Admin Recommended\" content folder. The K8s Dashboards won't be installed." + echo "You can still install them manually:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + exit 1 + fi + + local ADMIN_FOLDER + ADMIN_FOLDER="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended/"${ADMIN_FOLDER_JOB_ID}"/result )" + readonly ADMIN_FOLDER + + local ADMIN_FOLDER_CHILDREN + ADMIN_FOLDER_CHILDREN="$( echo "${ADMIN_FOLDER}" | jq '.children[]')" + readonly ADMIN_FOLDER_CHILDREN + + local ADMIN_FOLDER_ID + ADMIN_FOLDER_ID="$( echo "${ADMIN_FOLDER}" | jq '.id' | tr -d '"')" + readonly ADMIN_FOLDER_ID + + INTEGRATIONS_FOLDER_ID="$( echo "${ADMIN_FOLDER_CHILDREN}" | \ + jq -r "select(.name == \"${INTEGRATIONS_FOLDER_NAME}\") | .id" )" + + if [[ -z "${INTEGRATIONS_FOLDER_ID}" ]]; then + INTEGRATIONS_FOLDER_ID="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${INTEGRATIONS_FOLDER_NAME}\",\"parentId\":\"${ADMIN_FOLDER_ID}\",\"description\":\"Content provided by the Sumo Logic integrations.\"}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders | \ + jq -r " .id" )" + fi + + local INTEGRATIONS_FOLDER_CHILDREN + INTEGRATIONS_FOLDER_CHILDREN="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/"${INTEGRATIONS_FOLDER_ID}" | \ + jq '.children[]')" + readonly INTEGRATIONS_FOLDER_CHILDREN + + K8S_FOLDER_ID="$( echo "${INTEGRATIONS_FOLDER_CHILDREN}" | \ + jq -r "select(.name == \"${K8S_FOLDER_NAME}\") | .id" )" + } + + load_dashboards_folder_id + + if [[ -z "${K8S_FOLDER_ID}" ]]; then + APP_INSTALL_JOB_RESPONSE="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${K8S_FOLDER_NAME}\",\"destinationFolderId\":\"${INTEGRATIONS_FOLDER_ID}\",\"description\":\"Kubernetes dashboards provided by Sumo Logic.\"}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/"${K8S_APP_UUID}"/install )" + readonly APP_INSTALL_JOB_RESPONSE + + APP_INSTALL_JOB_ID="$(echo "${APP_INSTALL_JOB_RESPONSE}" | jq '.id' | tr -d '"' )" + readonly APP_INSTALL_JOB_ID + + APP_INSTALL_JOB_STATUS="InProgress" + while [ "${APP_INSTALL_JOB_STATUS}" = "InProgress" ]; do + APP_INSTALL_JOB_STATUS="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/install/"${APP_INSTALL_JOB_ID}"/status | jq '.status' | tr -d '"' )" + + sleep 1 + done + + if [ "${APP_INSTALL_JOB_STATUS}" != "Success" ]; then + ERROR_MSG="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/install/"${APP_INSTALL_JOB_ID}"/status )" + echo "${ERROR_MSG}" + + echo "Installation of the K8s Dashboards failed." + echo "You can still install them manually:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + exit 2 + else + load_dashboards_folder_id + + ORG_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/account/contract | jq '.orgId' | tr -d '"' )" + readonly ORG_ID + + PERMS_ERRORS=$( curl -XPUT -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"contentPermissionAssignments\": [{\"permissionName\": \"View\",\"sourceType\": \"org\",\"sourceId\": \"${ORG_ID}\",\"contentId\": \"${K8S_FOLDER_ID}\"}],\"notifyRecipients\":false,\"notificationMessage\":\"\"}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/"${K8S_FOLDER_ID}"/permissions/add | jq '.errors' ) + readonly PERMS_ERRORS + + if [ "${PERMS_ERRORS}" != "null" ]; then + echo "Setting permissions for the installed content failed." + echo "${PERMS_ERRORS}" + fi + + echo "Installation of the K8s Dashboards succeeded." + fi + else + echo "The K8s Dashboards have been already installed." + echo "You can (re)install them manually with:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + fi fields.tf: | resource "sumologic_field" "cluster" { count = var.create_fields ? 1 : 0 @@ -222,7 +362,7 @@ data: -var="monitors_disabled=${MONITORS_DISABLED}" \ || { echo "Error during applying Terraform monitors."; exit 1; } else - echo "The monitors were already installed in ${MONITORS_FOLDER_NAME}." + echo "The monitors have been already installed in ${MONITORS_FOLDER_NAME}." echo "You can (re)install them manually with:" echo "https://github.com/SumoLogic/terraform-sumologic-sumo-logic-monitor/tree/main/monitor_packages/kubernetes" fi @@ -510,6 +650,9 @@ data: # Setup Sumo Logic monitors if enabled bash /etc/terraform/monitors.sh + # Setup Sumo Logic dashboards if enabled + bash /etc/terraform/dashboards.sh + # Cleanup env variables export SUMOLOGIC_BASE_URL= export SUMOLOGIC_ACCESSKEY= diff --git a/tests/helm/terraform/static/collector_fields.output.yaml b/tests/helm/terraform/static/collector_fields.output.yaml index 14ecbf4651..85ef518b01 100644 --- a/tests/helm/terraform/static/collector_fields.output.yaml +++ b/tests/helm/terraform/static/collector_fields.output.yaml @@ -61,6 +61,146 @@ data: cd "${target}" && bash setup.sh done + dashboards.sh: | + #!/bin/bash + + SUMOLOGIC_ACCESSID=${SUMOLOGIC_ACCESSID:=""} + readonly SUMOLOGIC_ACCESSID + SUMOLOGIC_ACCESSKEY=${SUMOLOGIC_ACCESSKEY:=""} + readonly SUMOLOGIC_ACCESSKEY + SUMOLOGIC_BASE_URL=${SUMOLOGIC_BASE_URL:=""} + readonly SUMOLOGIC_BASE_URL + + INTEGRATIONS_FOLDER_NAME="Sumo Logic Integrations" + K8S_FOLDER_NAME="Kubernetes" + K8S_APP_UUID="162ceac7-166a-4475-8427-65e170ae9837" + + function load_dashboards_folder_id() { + local ADMIN_FOLDER_JOB_ID + ADMIN_FOLDER_JOB_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended | jq '.id' | tr -d '"' )" + readonly ADMIN_FOLDER_JOB_ID + + local ADMIN_FOLDER_JOB_STATUS + ADMIN_FOLDER_JOB_STATUS="InProgress" + while [ "${ADMIN_FOLDER_JOB_STATUS}" = "InProgress" ]; do + ADMIN_FOLDER_JOB_STATUS="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended/"${ADMIN_FOLDER_JOB_ID}"/status | jq '.status' | tr -d '"' )" + + sleep 1 + done + + if [ "${ADMIN_FOLDER_JOB_STATUS}" != "Success" ]; then + echo "Could not fetch data from the \"Admin Recommended\" content folder. The K8s Dashboards won't be installed." + echo "You can still install them manually:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + exit 1 + fi + + local ADMIN_FOLDER + ADMIN_FOLDER="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended/"${ADMIN_FOLDER_JOB_ID}"/result )" + readonly ADMIN_FOLDER + + local ADMIN_FOLDER_CHILDREN + ADMIN_FOLDER_CHILDREN="$( echo "${ADMIN_FOLDER}" | jq '.children[]')" + readonly ADMIN_FOLDER_CHILDREN + + local ADMIN_FOLDER_ID + ADMIN_FOLDER_ID="$( echo "${ADMIN_FOLDER}" | jq '.id' | tr -d '"')" + readonly ADMIN_FOLDER_ID + + INTEGRATIONS_FOLDER_ID="$( echo "${ADMIN_FOLDER_CHILDREN}" | \ + jq -r "select(.name == \"${INTEGRATIONS_FOLDER_NAME}\") | .id" )" + + if [[ -z "${INTEGRATIONS_FOLDER_ID}" ]]; then + INTEGRATIONS_FOLDER_ID="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${INTEGRATIONS_FOLDER_NAME}\",\"parentId\":\"${ADMIN_FOLDER_ID}\",\"description\":\"Content provided by the Sumo Logic integrations.\"}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders | \ + jq -r " .id" )" + fi + + local INTEGRATIONS_FOLDER_CHILDREN + INTEGRATIONS_FOLDER_CHILDREN="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/"${INTEGRATIONS_FOLDER_ID}" | \ + jq '.children[]')" + readonly INTEGRATIONS_FOLDER_CHILDREN + + K8S_FOLDER_ID="$( echo "${INTEGRATIONS_FOLDER_CHILDREN}" | \ + jq -r "select(.name == \"${K8S_FOLDER_NAME}\") | .id" )" + } + + load_dashboards_folder_id + + if [[ -z "${K8S_FOLDER_ID}" ]]; then + APP_INSTALL_JOB_RESPONSE="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${K8S_FOLDER_NAME}\",\"destinationFolderId\":\"${INTEGRATIONS_FOLDER_ID}\",\"description\":\"Kubernetes dashboards provided by Sumo Logic.\"}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/"${K8S_APP_UUID}"/install )" + readonly APP_INSTALL_JOB_RESPONSE + + APP_INSTALL_JOB_ID="$(echo "${APP_INSTALL_JOB_RESPONSE}" | jq '.id' | tr -d '"' )" + readonly APP_INSTALL_JOB_ID + + APP_INSTALL_JOB_STATUS="InProgress" + while [ "${APP_INSTALL_JOB_STATUS}" = "InProgress" ]; do + APP_INSTALL_JOB_STATUS="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/install/"${APP_INSTALL_JOB_ID}"/status | jq '.status' | tr -d '"' )" + + sleep 1 + done + + if [ "${APP_INSTALL_JOB_STATUS}" != "Success" ]; then + ERROR_MSG="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/install/"${APP_INSTALL_JOB_ID}"/status )" + echo "${ERROR_MSG}" + + echo "Installation of the K8s Dashboards failed." + echo "You can still install them manually:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + exit 2 + else + load_dashboards_folder_id + + ORG_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/account/contract | jq '.orgId' | tr -d '"' )" + readonly ORG_ID + + PERMS_ERRORS=$( curl -XPUT -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"contentPermissionAssignments\": [{\"permissionName\": \"View\",\"sourceType\": \"org\",\"sourceId\": \"${ORG_ID}\",\"contentId\": \"${K8S_FOLDER_ID}\"}],\"notifyRecipients\":false,\"notificationMessage\":\"\"}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/"${K8S_FOLDER_ID}"/permissions/add | jq '.errors' ) + readonly PERMS_ERRORS + + if [ "${PERMS_ERRORS}" != "null" ]; then + echo "Setting permissions for the installed content failed." + echo "${PERMS_ERRORS}" + fi + + echo "Installation of the K8s Dashboards succeeded." + fi + else + echo "The K8s Dashboards have been already installed." + echo "You can (re)install them manually with:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + fi fields.tf: | resource "sumologic_field" "cluster" { count = var.create_fields ? 1 : 0 @@ -221,7 +361,7 @@ data: -var="monitors_disabled=${MONITORS_DISABLED}" \ || { echo "Error during applying Terraform monitors."; exit 1; } else - echo "The monitors were already installed in ${MONITORS_FOLDER_NAME}." + echo "The monitors have been already installed in ${MONITORS_FOLDER_NAME}." echo "You can (re)install them manually with:" echo "https://github.com/SumoLogic/terraform-sumologic-sumo-logic-monitor/tree/main/monitor_packages/kubernetes" fi @@ -464,6 +604,9 @@ data: # Setup Sumo Logic monitors if enabled bash /etc/terraform/monitors.sh + # Setup Sumo Logic dashboards if enabled + bash /etc/terraform/dashboards.sh + # Cleanup env variables export SUMOLOGIC_BASE_URL= export SUMOLOGIC_ACCESSKEY= diff --git a/tests/helm/terraform/static/conditional_sources.output.yaml b/tests/helm/terraform/static/conditional_sources.output.yaml index 248108aaa6..b3c585aac6 100644 --- a/tests/helm/terraform/static/conditional_sources.output.yaml +++ b/tests/helm/terraform/static/conditional_sources.output.yaml @@ -61,6 +61,146 @@ data: cd "${target}" && bash setup.sh done + dashboards.sh: | + #!/bin/bash + + SUMOLOGIC_ACCESSID=${SUMOLOGIC_ACCESSID:=""} + readonly SUMOLOGIC_ACCESSID + SUMOLOGIC_ACCESSKEY=${SUMOLOGIC_ACCESSKEY:=""} + readonly SUMOLOGIC_ACCESSKEY + SUMOLOGIC_BASE_URL=${SUMOLOGIC_BASE_URL:=""} + readonly SUMOLOGIC_BASE_URL + + INTEGRATIONS_FOLDER_NAME="Sumo Logic Integrations" + K8S_FOLDER_NAME="Kubernetes" + K8S_APP_UUID="162ceac7-166a-4475-8427-65e170ae9837" + + function load_dashboards_folder_id() { + local ADMIN_FOLDER_JOB_ID + ADMIN_FOLDER_JOB_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended | jq '.id' | tr -d '"' )" + readonly ADMIN_FOLDER_JOB_ID + + local ADMIN_FOLDER_JOB_STATUS + ADMIN_FOLDER_JOB_STATUS="InProgress" + while [ "${ADMIN_FOLDER_JOB_STATUS}" = "InProgress" ]; do + ADMIN_FOLDER_JOB_STATUS="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended/"${ADMIN_FOLDER_JOB_ID}"/status | jq '.status' | tr -d '"' )" + + sleep 1 + done + + if [ "${ADMIN_FOLDER_JOB_STATUS}" != "Success" ]; then + echo "Could not fetch data from the \"Admin Recommended\" content folder. The K8s Dashboards won't be installed." + echo "You can still install them manually:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + exit 1 + fi + + local ADMIN_FOLDER + ADMIN_FOLDER="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended/"${ADMIN_FOLDER_JOB_ID}"/result )" + readonly ADMIN_FOLDER + + local ADMIN_FOLDER_CHILDREN + ADMIN_FOLDER_CHILDREN="$( echo "${ADMIN_FOLDER}" | jq '.children[]')" + readonly ADMIN_FOLDER_CHILDREN + + local ADMIN_FOLDER_ID + ADMIN_FOLDER_ID="$( echo "${ADMIN_FOLDER}" | jq '.id' | tr -d '"')" + readonly ADMIN_FOLDER_ID + + INTEGRATIONS_FOLDER_ID="$( echo "${ADMIN_FOLDER_CHILDREN}" | \ + jq -r "select(.name == \"${INTEGRATIONS_FOLDER_NAME}\") | .id" )" + + if [[ -z "${INTEGRATIONS_FOLDER_ID}" ]]; then + INTEGRATIONS_FOLDER_ID="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${INTEGRATIONS_FOLDER_NAME}\",\"parentId\":\"${ADMIN_FOLDER_ID}\",\"description\":\"Content provided by the Sumo Logic integrations.\"}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders | \ + jq -r " .id" )" + fi + + local INTEGRATIONS_FOLDER_CHILDREN + INTEGRATIONS_FOLDER_CHILDREN="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/"${INTEGRATIONS_FOLDER_ID}" | \ + jq '.children[]')" + readonly INTEGRATIONS_FOLDER_CHILDREN + + K8S_FOLDER_ID="$( echo "${INTEGRATIONS_FOLDER_CHILDREN}" | \ + jq -r "select(.name == \"${K8S_FOLDER_NAME}\") | .id" )" + } + + load_dashboards_folder_id + + if [[ -z "${K8S_FOLDER_ID}" ]]; then + APP_INSTALL_JOB_RESPONSE="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${K8S_FOLDER_NAME}\",\"destinationFolderId\":\"${INTEGRATIONS_FOLDER_ID}\",\"description\":\"Kubernetes dashboards provided by Sumo Logic.\"}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/"${K8S_APP_UUID}"/install )" + readonly APP_INSTALL_JOB_RESPONSE + + APP_INSTALL_JOB_ID="$(echo "${APP_INSTALL_JOB_RESPONSE}" | jq '.id' | tr -d '"' )" + readonly APP_INSTALL_JOB_ID + + APP_INSTALL_JOB_STATUS="InProgress" + while [ "${APP_INSTALL_JOB_STATUS}" = "InProgress" ]; do + APP_INSTALL_JOB_STATUS="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/install/"${APP_INSTALL_JOB_ID}"/status | jq '.status' | tr -d '"' )" + + sleep 1 + done + + if [ "${APP_INSTALL_JOB_STATUS}" != "Success" ]; then + ERROR_MSG="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/install/"${APP_INSTALL_JOB_ID}"/status )" + echo "${ERROR_MSG}" + + echo "Installation of the K8s Dashboards failed." + echo "You can still install them manually:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + exit 2 + else + load_dashboards_folder_id + + ORG_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/account/contract | jq '.orgId' | tr -d '"' )" + readonly ORG_ID + + PERMS_ERRORS=$( curl -XPUT -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"contentPermissionAssignments\": [{\"permissionName\": \"View\",\"sourceType\": \"org\",\"sourceId\": \"${ORG_ID}\",\"contentId\": \"${K8S_FOLDER_ID}\"}],\"notifyRecipients\":false,\"notificationMessage\":\"\"}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/"${K8S_FOLDER_ID}"/permissions/add | jq '.errors' ) + readonly PERMS_ERRORS + + if [ "${PERMS_ERRORS}" != "null" ]; then + echo "Setting permissions for the installed content failed." + echo "${PERMS_ERRORS}" + fi + + echo "Installation of the K8s Dashboards succeeded." + fi + else + echo "The K8s Dashboards have been already installed." + echo "You can (re)install them manually with:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + fi fields.tf: | resource "sumologic_field" "cluster" { count = var.create_fields ? 1 : 0 @@ -211,7 +351,7 @@ data: -var="monitors_disabled=${MONITORS_DISABLED}" \ || { echo "Error during applying Terraform monitors."; exit 1; } else - echo "The monitors were already installed in ${MONITORS_FOLDER_NAME}." + echo "The monitors have been already installed in ${MONITORS_FOLDER_NAME}." echo "You can (re)install them manually with:" echo "https://github.com/SumoLogic/terraform-sumologic-sumo-logic-monitor/tree/main/monitor_packages/kubernetes" fi @@ -382,6 +522,9 @@ data: # Setup Sumo Logic monitors if enabled bash /etc/terraform/monitors.sh + # Setup Sumo Logic dashboards if enabled + bash /etc/terraform/dashboards.sh + # Cleanup env variables export SUMOLOGIC_BASE_URL= export SUMOLOGIC_ACCESSKEY= diff --git a/tests/helm/terraform/static/custom.output.yaml b/tests/helm/terraform/static/custom.output.yaml index 248108aaa6..b3c585aac6 100644 --- a/tests/helm/terraform/static/custom.output.yaml +++ b/tests/helm/terraform/static/custom.output.yaml @@ -61,6 +61,146 @@ data: cd "${target}" && bash setup.sh done + dashboards.sh: | + #!/bin/bash + + SUMOLOGIC_ACCESSID=${SUMOLOGIC_ACCESSID:=""} + readonly SUMOLOGIC_ACCESSID + SUMOLOGIC_ACCESSKEY=${SUMOLOGIC_ACCESSKEY:=""} + readonly SUMOLOGIC_ACCESSKEY + SUMOLOGIC_BASE_URL=${SUMOLOGIC_BASE_URL:=""} + readonly SUMOLOGIC_BASE_URL + + INTEGRATIONS_FOLDER_NAME="Sumo Logic Integrations" + K8S_FOLDER_NAME="Kubernetes" + K8S_APP_UUID="162ceac7-166a-4475-8427-65e170ae9837" + + function load_dashboards_folder_id() { + local ADMIN_FOLDER_JOB_ID + ADMIN_FOLDER_JOB_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended | jq '.id' | tr -d '"' )" + readonly ADMIN_FOLDER_JOB_ID + + local ADMIN_FOLDER_JOB_STATUS + ADMIN_FOLDER_JOB_STATUS="InProgress" + while [ "${ADMIN_FOLDER_JOB_STATUS}" = "InProgress" ]; do + ADMIN_FOLDER_JOB_STATUS="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended/"${ADMIN_FOLDER_JOB_ID}"/status | jq '.status' | tr -d '"' )" + + sleep 1 + done + + if [ "${ADMIN_FOLDER_JOB_STATUS}" != "Success" ]; then + echo "Could not fetch data from the \"Admin Recommended\" content folder. The K8s Dashboards won't be installed." + echo "You can still install them manually:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + exit 1 + fi + + local ADMIN_FOLDER + ADMIN_FOLDER="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended/"${ADMIN_FOLDER_JOB_ID}"/result )" + readonly ADMIN_FOLDER + + local ADMIN_FOLDER_CHILDREN + ADMIN_FOLDER_CHILDREN="$( echo "${ADMIN_FOLDER}" | jq '.children[]')" + readonly ADMIN_FOLDER_CHILDREN + + local ADMIN_FOLDER_ID + ADMIN_FOLDER_ID="$( echo "${ADMIN_FOLDER}" | jq '.id' | tr -d '"')" + readonly ADMIN_FOLDER_ID + + INTEGRATIONS_FOLDER_ID="$( echo "${ADMIN_FOLDER_CHILDREN}" | \ + jq -r "select(.name == \"${INTEGRATIONS_FOLDER_NAME}\") | .id" )" + + if [[ -z "${INTEGRATIONS_FOLDER_ID}" ]]; then + INTEGRATIONS_FOLDER_ID="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${INTEGRATIONS_FOLDER_NAME}\",\"parentId\":\"${ADMIN_FOLDER_ID}\",\"description\":\"Content provided by the Sumo Logic integrations.\"}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders | \ + jq -r " .id" )" + fi + + local INTEGRATIONS_FOLDER_CHILDREN + INTEGRATIONS_FOLDER_CHILDREN="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/"${INTEGRATIONS_FOLDER_ID}" | \ + jq '.children[]')" + readonly INTEGRATIONS_FOLDER_CHILDREN + + K8S_FOLDER_ID="$( echo "${INTEGRATIONS_FOLDER_CHILDREN}" | \ + jq -r "select(.name == \"${K8S_FOLDER_NAME}\") | .id" )" + } + + load_dashboards_folder_id + + if [[ -z "${K8S_FOLDER_ID}" ]]; then + APP_INSTALL_JOB_RESPONSE="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${K8S_FOLDER_NAME}\",\"destinationFolderId\":\"${INTEGRATIONS_FOLDER_ID}\",\"description\":\"Kubernetes dashboards provided by Sumo Logic.\"}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/"${K8S_APP_UUID}"/install )" + readonly APP_INSTALL_JOB_RESPONSE + + APP_INSTALL_JOB_ID="$(echo "${APP_INSTALL_JOB_RESPONSE}" | jq '.id' | tr -d '"' )" + readonly APP_INSTALL_JOB_ID + + APP_INSTALL_JOB_STATUS="InProgress" + while [ "${APP_INSTALL_JOB_STATUS}" = "InProgress" ]; do + APP_INSTALL_JOB_STATUS="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/install/"${APP_INSTALL_JOB_ID}"/status | jq '.status' | tr -d '"' )" + + sleep 1 + done + + if [ "${APP_INSTALL_JOB_STATUS}" != "Success" ]; then + ERROR_MSG="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/install/"${APP_INSTALL_JOB_ID}"/status )" + echo "${ERROR_MSG}" + + echo "Installation of the K8s Dashboards failed." + echo "You can still install them manually:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + exit 2 + else + load_dashboards_folder_id + + ORG_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/account/contract | jq '.orgId' | tr -d '"' )" + readonly ORG_ID + + PERMS_ERRORS=$( curl -XPUT -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"contentPermissionAssignments\": [{\"permissionName\": \"View\",\"sourceType\": \"org\",\"sourceId\": \"${ORG_ID}\",\"contentId\": \"${K8S_FOLDER_ID}\"}],\"notifyRecipients\":false,\"notificationMessage\":\"\"}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/"${K8S_FOLDER_ID}"/permissions/add | jq '.errors' ) + readonly PERMS_ERRORS + + if [ "${PERMS_ERRORS}" != "null" ]; then + echo "Setting permissions for the installed content failed." + echo "${PERMS_ERRORS}" + fi + + echo "Installation of the K8s Dashboards succeeded." + fi + else + echo "The K8s Dashboards have been already installed." + echo "You can (re)install them manually with:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + fi fields.tf: | resource "sumologic_field" "cluster" { count = var.create_fields ? 1 : 0 @@ -211,7 +351,7 @@ data: -var="monitors_disabled=${MONITORS_DISABLED}" \ || { echo "Error during applying Terraform monitors."; exit 1; } else - echo "The monitors were already installed in ${MONITORS_FOLDER_NAME}." + echo "The monitors have been already installed in ${MONITORS_FOLDER_NAME}." echo "You can (re)install them manually with:" echo "https://github.com/SumoLogic/terraform-sumologic-sumo-logic-monitor/tree/main/monitor_packages/kubernetes" fi @@ -382,6 +522,9 @@ data: # Setup Sumo Logic monitors if enabled bash /etc/terraform/monitors.sh + # Setup Sumo Logic dashboards if enabled + bash /etc/terraform/dashboards.sh + # Cleanup env variables export SUMOLOGIC_BASE_URL= export SUMOLOGIC_ACCESSKEY= diff --git a/tests/helm/terraform/static/default.output.yaml b/tests/helm/terraform/static/default.output.yaml index 063ca8c4fd..a27c7e0dbe 100644 --- a/tests/helm/terraform/static/default.output.yaml +++ b/tests/helm/terraform/static/default.output.yaml @@ -61,6 +61,146 @@ data: cd "${target}" && bash setup.sh done + dashboards.sh: | + #!/bin/bash + + SUMOLOGIC_ACCESSID=${SUMOLOGIC_ACCESSID:=""} + readonly SUMOLOGIC_ACCESSID + SUMOLOGIC_ACCESSKEY=${SUMOLOGIC_ACCESSKEY:=""} + readonly SUMOLOGIC_ACCESSKEY + SUMOLOGIC_BASE_URL=${SUMOLOGIC_BASE_URL:=""} + readonly SUMOLOGIC_BASE_URL + + INTEGRATIONS_FOLDER_NAME="Sumo Logic Integrations" + K8S_FOLDER_NAME="Kubernetes" + K8S_APP_UUID="162ceac7-166a-4475-8427-65e170ae9837" + + function load_dashboards_folder_id() { + local ADMIN_FOLDER_JOB_ID + ADMIN_FOLDER_JOB_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended | jq '.id' | tr -d '"' )" + readonly ADMIN_FOLDER_JOB_ID + + local ADMIN_FOLDER_JOB_STATUS + ADMIN_FOLDER_JOB_STATUS="InProgress" + while [ "${ADMIN_FOLDER_JOB_STATUS}" = "InProgress" ]; do + ADMIN_FOLDER_JOB_STATUS="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended/"${ADMIN_FOLDER_JOB_ID}"/status | jq '.status' | tr -d '"' )" + + sleep 1 + done + + if [ "${ADMIN_FOLDER_JOB_STATUS}" != "Success" ]; then + echo "Could not fetch data from the \"Admin Recommended\" content folder. The K8s Dashboards won't be installed." + echo "You can still install them manually:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + exit 1 + fi + + local ADMIN_FOLDER + ADMIN_FOLDER="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended/"${ADMIN_FOLDER_JOB_ID}"/result )" + readonly ADMIN_FOLDER + + local ADMIN_FOLDER_CHILDREN + ADMIN_FOLDER_CHILDREN="$( echo "${ADMIN_FOLDER}" | jq '.children[]')" + readonly ADMIN_FOLDER_CHILDREN + + local ADMIN_FOLDER_ID + ADMIN_FOLDER_ID="$( echo "${ADMIN_FOLDER}" | jq '.id' | tr -d '"')" + readonly ADMIN_FOLDER_ID + + INTEGRATIONS_FOLDER_ID="$( echo "${ADMIN_FOLDER_CHILDREN}" | \ + jq -r "select(.name == \"${INTEGRATIONS_FOLDER_NAME}\") | .id" )" + + if [[ -z "${INTEGRATIONS_FOLDER_ID}" ]]; then + INTEGRATIONS_FOLDER_ID="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${INTEGRATIONS_FOLDER_NAME}\",\"parentId\":\"${ADMIN_FOLDER_ID}\",\"description\":\"Content provided by the Sumo Logic integrations.\"}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders | \ + jq -r " .id" )" + fi + + local INTEGRATIONS_FOLDER_CHILDREN + INTEGRATIONS_FOLDER_CHILDREN="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/"${INTEGRATIONS_FOLDER_ID}" | \ + jq '.children[]')" + readonly INTEGRATIONS_FOLDER_CHILDREN + + K8S_FOLDER_ID="$( echo "${INTEGRATIONS_FOLDER_CHILDREN}" | \ + jq -r "select(.name == \"${K8S_FOLDER_NAME}\") | .id" )" + } + + load_dashboards_folder_id + + if [[ -z "${K8S_FOLDER_ID}" ]]; then + APP_INSTALL_JOB_RESPONSE="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${K8S_FOLDER_NAME}\",\"destinationFolderId\":\"${INTEGRATIONS_FOLDER_ID}\",\"description\":\"Kubernetes dashboards provided by Sumo Logic.\"}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/"${K8S_APP_UUID}"/install )" + readonly APP_INSTALL_JOB_RESPONSE + + APP_INSTALL_JOB_ID="$(echo "${APP_INSTALL_JOB_RESPONSE}" | jq '.id' | tr -d '"' )" + readonly APP_INSTALL_JOB_ID + + APP_INSTALL_JOB_STATUS="InProgress" + while [ "${APP_INSTALL_JOB_STATUS}" = "InProgress" ]; do + APP_INSTALL_JOB_STATUS="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/install/"${APP_INSTALL_JOB_ID}"/status | jq '.status' | tr -d '"' )" + + sleep 1 + done + + if [ "${APP_INSTALL_JOB_STATUS}" != "Success" ]; then + ERROR_MSG="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/install/"${APP_INSTALL_JOB_ID}"/status )" + echo "${ERROR_MSG}" + + echo "Installation of the K8s Dashboards failed." + echo "You can still install them manually:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + exit 2 + else + load_dashboards_folder_id + + ORG_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/account/contract | jq '.orgId' | tr -d '"' )" + readonly ORG_ID + + PERMS_ERRORS=$( curl -XPUT -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"contentPermissionAssignments\": [{\"permissionName\": \"View\",\"sourceType\": \"org\",\"sourceId\": \"${ORG_ID}\",\"contentId\": \"${K8S_FOLDER_ID}\"}],\"notifyRecipients\":false,\"notificationMessage\":\"\"}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/"${K8S_FOLDER_ID}"/permissions/add | jq '.errors' ) + readonly PERMS_ERRORS + + if [ "${PERMS_ERRORS}" != "null" ]; then + echo "Setting permissions for the installed content failed." + echo "${PERMS_ERRORS}" + fi + + echo "Installation of the K8s Dashboards succeeded." + fi + else + echo "The K8s Dashboards have been already installed." + echo "You can (re)install them manually with:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + fi fields.tf: | resource "sumologic_field" "cluster" { count = var.create_fields ? 1 : 0 @@ -221,7 +361,7 @@ data: -var="monitors_disabled=${MONITORS_DISABLED}" \ || { echo "Error during applying Terraform monitors."; exit 1; } else - echo "The monitors were already installed in ${MONITORS_FOLDER_NAME}." + echo "The monitors have been already installed in ${MONITORS_FOLDER_NAME}." echo "You can (re)install them manually with:" echo "https://github.com/SumoLogic/terraform-sumologic-sumo-logic-monitor/tree/main/monitor_packages/kubernetes" fi @@ -462,6 +602,9 @@ data: # Setup Sumo Logic monitors if enabled bash /etc/terraform/monitors.sh + # Setup Sumo Logic dashboards if enabled + bash /etc/terraform/dashboards.sh + # Cleanup env variables export SUMOLOGIC_BASE_URL= export SUMOLOGIC_ACCESSKEY= diff --git a/tests/helm/terraform/static/disable_default_metrics.output.yaml b/tests/helm/terraform/static/disable_default_metrics.output.yaml index a52c98f282..bfd0b4beb3 100644 --- a/tests/helm/terraform/static/disable_default_metrics.output.yaml +++ b/tests/helm/terraform/static/disable_default_metrics.output.yaml @@ -61,6 +61,146 @@ data: cd "${target}" && bash setup.sh done + dashboards.sh: | + #!/bin/bash + + SUMOLOGIC_ACCESSID=${SUMOLOGIC_ACCESSID:=""} + readonly SUMOLOGIC_ACCESSID + SUMOLOGIC_ACCESSKEY=${SUMOLOGIC_ACCESSKEY:=""} + readonly SUMOLOGIC_ACCESSKEY + SUMOLOGIC_BASE_URL=${SUMOLOGIC_BASE_URL:=""} + readonly SUMOLOGIC_BASE_URL + + INTEGRATIONS_FOLDER_NAME="Sumo Logic Integrations" + K8S_FOLDER_NAME="Kubernetes" + K8S_APP_UUID="162ceac7-166a-4475-8427-65e170ae9837" + + function load_dashboards_folder_id() { + local ADMIN_FOLDER_JOB_ID + ADMIN_FOLDER_JOB_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended | jq '.id' | tr -d '"' )" + readonly ADMIN_FOLDER_JOB_ID + + local ADMIN_FOLDER_JOB_STATUS + ADMIN_FOLDER_JOB_STATUS="InProgress" + while [ "${ADMIN_FOLDER_JOB_STATUS}" = "InProgress" ]; do + ADMIN_FOLDER_JOB_STATUS="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended/"${ADMIN_FOLDER_JOB_ID}"/status | jq '.status' | tr -d '"' )" + + sleep 1 + done + + if [ "${ADMIN_FOLDER_JOB_STATUS}" != "Success" ]; then + echo "Could not fetch data from the \"Admin Recommended\" content folder. The K8s Dashboards won't be installed." + echo "You can still install them manually:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + exit 1 + fi + + local ADMIN_FOLDER + ADMIN_FOLDER="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended/"${ADMIN_FOLDER_JOB_ID}"/result )" + readonly ADMIN_FOLDER + + local ADMIN_FOLDER_CHILDREN + ADMIN_FOLDER_CHILDREN="$( echo "${ADMIN_FOLDER}" | jq '.children[]')" + readonly ADMIN_FOLDER_CHILDREN + + local ADMIN_FOLDER_ID + ADMIN_FOLDER_ID="$( echo "${ADMIN_FOLDER}" | jq '.id' | tr -d '"')" + readonly ADMIN_FOLDER_ID + + INTEGRATIONS_FOLDER_ID="$( echo "${ADMIN_FOLDER_CHILDREN}" | \ + jq -r "select(.name == \"${INTEGRATIONS_FOLDER_NAME}\") | .id" )" + + if [[ -z "${INTEGRATIONS_FOLDER_ID}" ]]; then + INTEGRATIONS_FOLDER_ID="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${INTEGRATIONS_FOLDER_NAME}\",\"parentId\":\"${ADMIN_FOLDER_ID}\",\"description\":\"Content provided by the Sumo Logic integrations.\"}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders | \ + jq -r " .id" )" + fi + + local INTEGRATIONS_FOLDER_CHILDREN + INTEGRATIONS_FOLDER_CHILDREN="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/"${INTEGRATIONS_FOLDER_ID}" | \ + jq '.children[]')" + readonly INTEGRATIONS_FOLDER_CHILDREN + + K8S_FOLDER_ID="$( echo "${INTEGRATIONS_FOLDER_CHILDREN}" | \ + jq -r "select(.name == \"${K8S_FOLDER_NAME}\") | .id" )" + } + + load_dashboards_folder_id + + if [[ -z "${K8S_FOLDER_ID}" ]]; then + APP_INSTALL_JOB_RESPONSE="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${K8S_FOLDER_NAME}\",\"destinationFolderId\":\"${INTEGRATIONS_FOLDER_ID}\",\"description\":\"Kubernetes dashboards provided by Sumo Logic.\"}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/"${K8S_APP_UUID}"/install )" + readonly APP_INSTALL_JOB_RESPONSE + + APP_INSTALL_JOB_ID="$(echo "${APP_INSTALL_JOB_RESPONSE}" | jq '.id' | tr -d '"' )" + readonly APP_INSTALL_JOB_ID + + APP_INSTALL_JOB_STATUS="InProgress" + while [ "${APP_INSTALL_JOB_STATUS}" = "InProgress" ]; do + APP_INSTALL_JOB_STATUS="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/install/"${APP_INSTALL_JOB_ID}"/status | jq '.status' | tr -d '"' )" + + sleep 1 + done + + if [ "${APP_INSTALL_JOB_STATUS}" != "Success" ]; then + ERROR_MSG="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/install/"${APP_INSTALL_JOB_ID}"/status )" + echo "${ERROR_MSG}" + + echo "Installation of the K8s Dashboards failed." + echo "You can still install them manually:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + exit 2 + else + load_dashboards_folder_id + + ORG_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/account/contract | jq '.orgId' | tr -d '"' )" + readonly ORG_ID + + PERMS_ERRORS=$( curl -XPUT -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"contentPermissionAssignments\": [{\"permissionName\": \"View\",\"sourceType\": \"org\",\"sourceId\": \"${ORG_ID}\",\"contentId\": \"${K8S_FOLDER_ID}\"}],\"notifyRecipients\":false,\"notificationMessage\":\"\"}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/"${K8S_FOLDER_ID}"/permissions/add | jq '.errors' ) + readonly PERMS_ERRORS + + if [ "${PERMS_ERRORS}" != "null" ]; then + echo "Setting permissions for the installed content failed." + echo "${PERMS_ERRORS}" + fi + + echo "Installation of the K8s Dashboards succeeded." + fi + else + echo "The K8s Dashboards have been already installed." + echo "You can (re)install them manually with:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + fi fields.tf: | resource "sumologic_field" "cluster" { count = var.create_fields ? 1 : 0 @@ -220,7 +360,7 @@ data: -var="monitors_disabled=${MONITORS_DISABLED}" \ || { echo "Error during applying Terraform monitors."; exit 1; } else - echo "The monitors were already installed in ${MONITORS_FOLDER_NAME}." + echo "The monitors have been already installed in ${MONITORS_FOLDER_NAME}." echo "You can (re)install them manually with:" echo "https://github.com/SumoLogic/terraform-sumologic-sumo-logic-monitor/tree/main/monitor_packages/kubernetes" fi @@ -454,6 +594,9 @@ data: # Setup Sumo Logic monitors if enabled bash /etc/terraform/monitors.sh + # Setup Sumo Logic dashboards if enabled + bash /etc/terraform/dashboards.sh + # Cleanup env variables export SUMOLOGIC_BASE_URL= export SUMOLOGIC_ACCESSKEY= diff --git a/tests/helm/terraform/static/disabled_dashboards.input.yaml b/tests/helm/terraform/static/disabled_dashboards.input.yaml new file mode 100644 index 0000000000..94c9f88904 --- /dev/null +++ b/tests/helm/terraform/static/disabled_dashboards.input.yaml @@ -0,0 +1,4 @@ +sumologic: + setup: + dashboards: + enabled: false \ No newline at end of file diff --git a/tests/helm/terraform/static/disabled_dashboards.output.yaml b/tests/helm/terraform/static/disabled_dashboards.output.yaml new file mode 100644 index 0000000000..a6454ba6a3 --- /dev/null +++ b/tests/helm/terraform/static/disabled_dashboards.output.yaml @@ -0,0 +1,636 @@ +--- +# Source: sumologic/templates/setup/configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: RELEASE-NAME-sumologic-setup + annotations: + helm.sh/hook: pre-install,pre-upgrade + helm.sh/hook-weight: "2" + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + labels: + app: RELEASE-NAME-sumologic + chart: "sumologic-%CURRENT_CHART_VERSION%" + release: "RELEASE-NAME" + heritage: "Helm" +data: + custom.sh: | + #!/bin/bash + # + # This script copies files from /customer-scripts to /scripts/ basing on the filename + # + # Example file structure: + # + # /customer-scripts + # ├── dir1_main.tf + # ├── dir1_setup.sh + # ├── dir2_list.txt + # └── dir2_setup.sh + # + # Expected structure: + # + # /scripts + # ├── dir1 + # │ ├── main.tf + # │ └── setup.sh + # └── dir2 + # ├── list.txt + # └── setup.sh + # + # shellcheck disable=SC2010 + # extract target directory names from the file names using _ as separator + err_report() { + echo "Custom script error on line $1" + exit 1 + } + trap 'err_report $LINENO' ERR + + for dir in $(ls -1 /customer-scripts | grep _ | grep -oE '^.*?_' | sed 's/_//g' | sort | uniq); do + target="/scripts/${dir}" + mkdir "${target}" + # shellcheck disable=SC2010 + # Get files for given directory and take only filename part (after first _) + for file in $(ls -1 "/customer-scripts/${dir}_"* | grep -oE '_.*' | sed 's/_//g'); do + cp "/customer-scripts/${dir}_${file}" "${target}/${file}" + done + + if [[ ! -f setup.sh ]]; then + echo "You're missing setup.sh script in custom scripts directory: '${dir}'" + continue + fi + + cd "${target}" && bash setup.sh + done + dashboards.sh: | + #!/bin/bash + + SUMOLOGIC_ACCESSID=${SUMOLOGIC_ACCESSID:=""} + readonly SUMOLOGIC_ACCESSID + SUMOLOGIC_ACCESSKEY=${SUMOLOGIC_ACCESSKEY:=""} + readonly SUMOLOGIC_ACCESSKEY + SUMOLOGIC_BASE_URL=${SUMOLOGIC_BASE_URL:=""} + readonly SUMOLOGIC_BASE_URL + + INTEGRATIONS_FOLDER_NAME="Sumo Logic Integrations" + K8S_FOLDER_NAME="Kubernetes" + K8S_APP_UUID="162ceac7-166a-4475-8427-65e170ae9837" + + function load_dashboards_folder_id() { + local ADMIN_FOLDER_JOB_ID + ADMIN_FOLDER_JOB_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended | jq '.id' | tr -d '"' )" + readonly ADMIN_FOLDER_JOB_ID + + local ADMIN_FOLDER_JOB_STATUS + ADMIN_FOLDER_JOB_STATUS="InProgress" + while [ "${ADMIN_FOLDER_JOB_STATUS}" = "InProgress" ]; do + ADMIN_FOLDER_JOB_STATUS="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended/"${ADMIN_FOLDER_JOB_ID}"/status | jq '.status' | tr -d '"' )" + + sleep 1 + done + + if [ "${ADMIN_FOLDER_JOB_STATUS}" != "Success" ]; then + echo "Could not fetch data from the \"Admin Recommended\" content folder. The K8s Dashboards won't be installed." + echo "You can still install them manually:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + exit 1 + fi + + local ADMIN_FOLDER + ADMIN_FOLDER="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended/"${ADMIN_FOLDER_JOB_ID}"/result )" + readonly ADMIN_FOLDER + + local ADMIN_FOLDER_CHILDREN + ADMIN_FOLDER_CHILDREN="$( echo "${ADMIN_FOLDER}" | jq '.children[]')" + readonly ADMIN_FOLDER_CHILDREN + + local ADMIN_FOLDER_ID + ADMIN_FOLDER_ID="$( echo "${ADMIN_FOLDER}" | jq '.id' | tr -d '"')" + readonly ADMIN_FOLDER_ID + + INTEGRATIONS_FOLDER_ID="$( echo "${ADMIN_FOLDER_CHILDREN}" | \ + jq -r "select(.name == \"${INTEGRATIONS_FOLDER_NAME}\") | .id" )" + + if [[ -z "${INTEGRATIONS_FOLDER_ID}" ]]; then + INTEGRATIONS_FOLDER_ID="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${INTEGRATIONS_FOLDER_NAME}\",\"parentId\":\"${ADMIN_FOLDER_ID}\",\"description\":\"Content provided by the Sumo Logic integrations.\"}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders | \ + jq -r " .id" )" + fi + + local INTEGRATIONS_FOLDER_CHILDREN + INTEGRATIONS_FOLDER_CHILDREN="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/"${INTEGRATIONS_FOLDER_ID}" | \ + jq '.children[]')" + readonly INTEGRATIONS_FOLDER_CHILDREN + + K8S_FOLDER_ID="$( echo "${INTEGRATIONS_FOLDER_CHILDREN}" | \ + jq -r "select(.name == \"${K8S_FOLDER_NAME}\") | .id" )" + } + + load_dashboards_folder_id + + if [[ -z "${K8S_FOLDER_ID}" ]]; then + APP_INSTALL_JOB_RESPONSE="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${K8S_FOLDER_NAME}\",\"destinationFolderId\":\"${INTEGRATIONS_FOLDER_ID}\",\"description\":\"Kubernetes dashboards provided by Sumo Logic.\"}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/"${K8S_APP_UUID}"/install )" + readonly APP_INSTALL_JOB_RESPONSE + + APP_INSTALL_JOB_ID="$(echo "${APP_INSTALL_JOB_RESPONSE}" | jq '.id' | tr -d '"' )" + readonly APP_INSTALL_JOB_ID + + APP_INSTALL_JOB_STATUS="InProgress" + while [ "${APP_INSTALL_JOB_STATUS}" = "InProgress" ]; do + APP_INSTALL_JOB_STATUS="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/install/"${APP_INSTALL_JOB_ID}"/status | jq '.status' | tr -d '"' )" + + sleep 1 + done + + if [ "${APP_INSTALL_JOB_STATUS}" != "Success" ]; then + ERROR_MSG="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/install/"${APP_INSTALL_JOB_ID}"/status )" + echo "${ERROR_MSG}" + + echo "Installation of the K8s Dashboards failed." + echo "You can still install them manually:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + exit 2 + else + load_dashboards_folder_id + + ORG_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/account/contract | jq '.orgId' | tr -d '"' )" + readonly ORG_ID + + PERMS_ERRORS=$( curl -XPUT -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"contentPermissionAssignments\": [{\"permissionName\": \"View\",\"sourceType\": \"org\",\"sourceId\": \"${ORG_ID}\",\"contentId\": \"${K8S_FOLDER_ID}\"}],\"notifyRecipients\":false,\"notificationMessage\":\"\"}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/"${K8S_FOLDER_ID}"/permissions/add | jq '.errors' ) + readonly PERMS_ERRORS + + if [ "${PERMS_ERRORS}" != "null" ]; then + echo "Setting permissions for the installed content failed." + echo "${PERMS_ERRORS}" + fi + + echo "Installation of the K8s Dashboards succeeded." + fi + else + echo "The K8s Dashboards have been already installed." + echo "You can (re)install them manually with:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + fi + fields.tf: | + resource "sumologic_field" "cluster" { + count = var.create_fields ? 1 : 0 + + field_name = "cluster" + data_type = "String" + state = "Enabled" + } + resource "sumologic_field" "container" { + count = var.create_fields ? 1 : 0 + + field_name = "container" + data_type = "String" + state = "Enabled" + } + resource "sumologic_field" "deployment" { + count = var.create_fields ? 1 : 0 + + field_name = "deployment" + data_type = "String" + state = "Enabled" + } + resource "sumologic_field" "host" { + count = var.create_fields ? 1 : 0 + + field_name = "host" + data_type = "String" + state = "Enabled" + } + resource "sumologic_field" "namespace" { + count = var.create_fields ? 1 : 0 + + field_name = "namespace" + data_type = "String" + state = "Enabled" + } + resource "sumologic_field" "node" { + count = var.create_fields ? 1 : 0 + + field_name = "node" + data_type = "String" + state = "Enabled" + } + resource "sumologic_field" "pod" { + count = var.create_fields ? 1 : 0 + + field_name = "pod" + data_type = "String" + state = "Enabled" + } + resource "sumologic_field" "service" { + count = var.create_fields ? 1 : 0 + + field_name = "service" + data_type = "String" + state = "Enabled" + } + locals.tf: | + locals { + default_events_source = "events" + default_logs_source = "logs" + apiserver_metrics_source = "apiserver-metrics" + control_plane_metrics_source = "control-plane-metrics" + controller_metrics_source = "kube-controller-manager-metrics" + default_metrics_source = "(default-metrics)" + kubelet_metrics_source = "kubelet-metrics" + node_metrics_source = "node-exporter-metrics" + scheduler_metrics_source = "kube-scheduler-metrics" + state_metrics_source = "kube-state-metrics" + } + main.tf: | + terraform { + required_providers { + sumologic = { + source = "sumologic/sumologic" + version = "~> 2.11" + } + kubernetes = { + source = "hashicorp/kubernetes" + version = "~> 1.13" + } + } + } + monitors.sh: | + #!/bin/bash + + SUMOLOGIC_ACCESSID=${SUMOLOGIC_ACCESSID:=""} + readonly SUMOLOGIC_ACCESSID + SUMOLOGIC_ACCESSKEY=${SUMOLOGIC_ACCESSKEY:=""} + readonly SUMOLOGIC_ACCESSKEY + SUMOLOGIC_BASE_URL=${SUMOLOGIC_BASE_URL:=""} + readonly SUMOLOGIC_BASE_URL + + INTEGRATIONS_FOLDER_NAME="Sumo Logic Integrations" + MONITORS_FOLDER_NAME="Kubernetes" + MONITORS_DISABLED="false" + + MONITORS_ROOT_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/monitors/root | jq -r '.id' )" + readonly MONITORS_ROOT_ID + + # verify if the integrations folder already exists + INTEGRATIONS_RESPONSE="$(curl -XGET -s -G \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/monitors/search \ + --data-urlencode "query=type:folder ${INTEGRATIONS_FOLDER_NAME}" | \ + jq '.[]' )" + readonly INTEGRATIONS_RESPONSE + + INTEGRATIONS_FOLDER_ID="$( echo "${INTEGRATIONS_RESPONSE}" | \ + jq -r "select(.item.name == \"${INTEGRATIONS_FOLDER_NAME}\") | select(.item.parentId == \"${MONITORS_ROOT_ID}\") | .item.id" )" + + # and create it if necessary + if [[ -z "${INTEGRATIONS_FOLDER_ID}" ]]; then + INTEGRATIONS_FOLDER_ID="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${INTEGRATIONS_FOLDER_NAME}\",\"type\":\"MonitorsLibraryFolder\",\"description\":\"Monitors provided by the Sumo Logic integrations.\"}" \ + "${SUMOLOGIC_BASE_URL}"v1/monitors?parentId="${MONITORS_ROOT_ID}" | \ + jq -r " .id" )" + fi + + # verify if the k8s monitors folder already exists + MONITORS_RESPONSE="$(curl -XGET -s -G \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/monitors/search \ + --data-urlencode "query=type:folder ${MONITORS_FOLDER_NAME}" | \ + jq '.[]' )" + readonly MONITORS_RESPONSE + + MONITORS_FOLDER_ID="$( echo "${MONITORS_RESPONSE}" | \ + jq -r "select(.item.name == \"${MONITORS_FOLDER_NAME}\") | select(.item.parentId == \"${INTEGRATIONS_FOLDER_ID}\") | .item.id" )" + readonly MONITORS_FOLDER_ID + + if [[ -z "${MONITORS_FOLDER_ID}" ]]; then + # go to monitors directory + cd /monitors || exit 2 + + # Fall back to init -upgrade to prevent: + # Error: Inconsistent dependency lock file + terraform init -input=false || terraform init -input=false -upgrade + + # extract environment from SUMOLOGIC_BASE_URL + # see: https://help.sumologic.com/APIs/General-API-Information/Sumo-Logic-Endpoints-by-Deployment-and-Firewall-Security + SUMOLOGIC_ENV=$( echo "${SUMOLOGIC_BASE_URL}" | sed -E 's/https:\/\/.*(au|ca|de|eu|fed|in|jp|us2)\.sumologic\.com.*/\1/' ) + if [[ "${SUMOLOGIC_BASE_URL}" == "${SUMOLOGIC_ENV}" ]] ; then + SUMOLOGIC_ENV="us1" + fi + + TF_LOG_PROVIDER=DEBUG terraform apply \ + -auto-approve \ + -var="access_id=${SUMOLOGIC_ACCESSID}" \ + -var="access_key=${SUMOLOGIC_ACCESSKEY}" \ + -var="environment=${SUMOLOGIC_ENV}" \ + -var="folder=${MONITORS_FOLDER_NAME}" \ + -var="folder_parent_id=${INTEGRATIONS_FOLDER_ID}" \ + -var="monitors_disabled=${MONITORS_DISABLED}" \ + || { echo "Error during applying Terraform monitors."; exit 1; } + else + echo "The monitors have been already installed in ${MONITORS_FOLDER_NAME}." + echo "You can (re)install them manually with:" + echo "https://github.com/SumoLogic/terraform-sumologic-sumo-logic-monitor/tree/main/monitor_packages/kubernetes" + fi + providers.tf: |- + provider "sumologic" {} + + provider "kubernetes" { + + cluster_ca_certificate = file("/var/run/secrets/kubernetes.io/serviceaccount/ca.crt") + host = "https://kubernetes.default.svc" + load_config_file = "false" + token = file("/var/run/secrets/kubernetes.io/serviceaccount/token") + } + resources.tf: | + resource "sumologic_collector" "collector" { + name = var.collector_name + fields = { + cluster = var.cluster_name + } + } + + resource "sumologic_http_source" "default_events_source" { + name = local.default_events_source + collector_id = sumologic_collector.collector.id + } + + resource "sumologic_http_source" "default_logs_source" { + name = local.default_logs_source + collector_id = sumologic_collector.collector.id + } + + resource "sumologic_http_source" "apiserver_metrics_source" { + name = local.apiserver_metrics_source + collector_id = sumologic_collector.collector.id + } + + resource "sumologic_http_source" "control_plane_metrics_source" { + name = local.control_plane_metrics_source + collector_id = sumologic_collector.collector.id + } + + resource "sumologic_http_source" "controller_metrics_source" { + name = local.controller_metrics_source + collector_id = sumologic_collector.collector.id + } + + resource "sumologic_http_source" "default_metrics_source" { + name = local.default_metrics_source + collector_id = sumologic_collector.collector.id + } + + resource "sumologic_http_source" "kubelet_metrics_source" { + name = local.kubelet_metrics_source + collector_id = sumologic_collector.collector.id + } + + resource "sumologic_http_source" "node_metrics_source" { + name = local.node_metrics_source + collector_id = sumologic_collector.collector.id + } + + resource "sumologic_http_source" "scheduler_metrics_source" { + name = local.scheduler_metrics_source + collector_id = sumologic_collector.collector.id + } + + resource "sumologic_http_source" "state_metrics_source" { + name = local.state_metrics_source + collector_id = sumologic_collector.collector.id + } + + resource "kubernetes_secret" "sumologic_collection_secret" { + metadata { + name = "sumologic" + namespace = var.namespace_name + } + + data = { + endpoint-events = sumologic_http_source.default_events_source.url + endpoint-logs = sumologic_http_source.default_logs_source.url + endpoint-metrics-apiserver = sumologic_http_source.apiserver_metrics_source.url + endpoint-control_plane_metrics_source = sumologic_http_source.control_plane_metrics_source.url + endpoint-metrics-kube-controller-manager = sumologic_http_source.controller_metrics_source.url + endpoint-metrics = sumologic_http_source.default_metrics_source.url + endpoint-metrics-kubelet = sumologic_http_source.kubelet_metrics_source.url + endpoint-metrics-node-exporter = sumologic_http_source.node_metrics_source.url + endpoint-metrics-kube-scheduler = sumologic_http_source.scheduler_metrics_source.url + endpoint-metrics-kube-state = sumologic_http_source.state_metrics_source.url + } + + type = "Opaque" + } + setup.sh: | + #!/bin/bash + + readonly DEBUG_MODE=${DEBUG_MODE:="false"} + readonly DEBUG_MODE_ENABLED_FLAG="true" + + # Let's compare the variables ignoring the case with help of ${VARIABLE,,} which makes the string lowercased + # so that we don't have to deal with True vs true vs TRUE + if [[ ${DEBUG_MODE,,} == "${DEBUG_MODE_ENABLED_FLAG}" ]]; then + echo "Entering the debug mode with continuous sleep. No setup will be performed." + echo "Please exec into the setup container and run the setup.sh by hand or set the sumologic.setup.debug=false and reinstall." + + while true; do + sleep 10 + echo "$(date) Sleeping in the debug mode..." + done + fi + + function fix_sumo_base_url() { + local BASE_URL + BASE_URL=${SUMOLOGIC_BASE_URL} + + if [[ "${BASE_URL}" =~ ^\s*$ ]]; then + BASE_URL="https://api.sumologic.com/api/" + fi + + OPTIONAL_REDIRECTION="$(curl -XGET -s -o /dev/null -D - \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${BASE_URL}"v1/collectors \ + | grep -Fi location )" + + if [[ ! ${OPTIONAL_REDIRECTION} =~ ^\s*$ ]]; then + BASE_URL=$( echo "${OPTIONAL_REDIRECTION}" | sed -E 's/.*: (https:\/\/.*(au|ca|de|eu|fed|in|jp|us2)?\.sumologic\.com\/api\/).*/\1/' ) + fi + + BASE_URL=${BASE_URL%v1*} + + echo "${BASE_URL}" + } + + SUMOLOGIC_BASE_URL=$(fix_sumo_base_url) + export SUMOLOGIC_BASE_URL + # Support proxy for Terraform + export HTTP_PROXY=${HTTP_PROXY:=""} + export HTTPS_PROXY=${HTTPS_PROXY:=""} + export NO_PROXY=${NO_PROXY:=""} + + function get_remaining_fields() { + local RESPONSE + RESPONSE="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/fields/quota)" + readonly RESPONSE + + echo "${RESPONSE}" + } + + # Check if we'd have at least 10 fields remaining after additional fields + # would be created for the collection + function should_create_fields() { + local RESPONSE + RESPONSE=$(get_remaining_fields) + readonly RESPONSE + + if ! jq -e <<< "${RESPONSE}" ; then + printf "Failed requesting fields API:\n%s\n" "${RESPONSE}" + return 1 + fi + + if ! jq -e '.remaining' <<< "${RESPONSE}" ; then + printf "Failed requesting fields API:\n%s\n" "${RESPONSE}" + return 1 + fi + + local REMAINING + REMAINING=$(jq -e '.remaining' <<< "${RESPONSE}") + readonly REMAINING + if [[ $(( REMAINING - 8 )) -ge 10 ]] ; then + return 0 + else + return 1 + fi + } + + cp /etc/terraform/{locals,main,providers,resources,variables,fields}.tf /terraform/ + cd /terraform || exit 1 + + # Fall back to init -upgrade to prevent: + # Error: Inconsistent dependency lock file + terraform init -input=false -get=false || terraform init -input=false -upgrade + + # Sumo Logic fields + if should_create_fields ; then + readonly CREATE_FIELDS=1 + FIELDS_RESPONSE="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/fields | jq '.data[]' )" + readonly FIELDS_RESPONSE + + declare -ra FIELDS=("cluster" "container" "deployment" "host" "namespace" "node" "pod" "service") + for FIELD in "${FIELDS[@]}" ; do + FIELD_ID=$( echo "${FIELDS_RESPONSE}" | jq -r "select(.fieldName == \"${FIELD}\") | .fieldId" ) + # Don't try to import non existing fields + if [[ -z "${FIELD_ID}" ]]; then + continue + fi + + terraform import \ + -var="create_fields=1" \ + sumologic_field."${FIELD}" "${FIELD_ID}" + done + else + readonly CREATE_FIELDS=0 + echo "Couldn't automatically create fields" + echo "You do not have enough field capacity to create the required fields automatically." + echo "Please refer to https://help.sumologic.com/Manage/Fields to manually create the fields after you have removed unused fields to free up capacity." + fi + + readonly COLLECTOR_NAME="kubernetes" + + # Sumo Logic Collector and HTTP sources + # Only import sources when collector exists. + if terraform import sumologic_collector.collector "${COLLECTOR_NAME}"; then + true # prevent to render empty if; then + terraform import sumologic_http_source.default_events_source "${COLLECTOR_NAME}/events" + terraform import sumologic_http_source.default_logs_source "${COLLECTOR_NAME}/logs" + terraform import sumologic_http_source.apiserver_metrics_source "${COLLECTOR_NAME}/apiserver-metrics" + terraform import sumologic_http_source.control_plane_metrics_source "${COLLECTOR_NAME}/control-plane-metrics" + terraform import sumologic_http_source.controller_metrics_source "${COLLECTOR_NAME}/kube-controller-manager-metrics" + terraform import sumologic_http_source.default_metrics_source "${COLLECTOR_NAME}/(default-metrics)" + terraform import sumologic_http_source.kubelet_metrics_source "${COLLECTOR_NAME}/kubelet-metrics" + terraform import sumologic_http_source.node_metrics_source "${COLLECTOR_NAME}/node-exporter-metrics" + terraform import sumologic_http_source.scheduler_metrics_source "${COLLECTOR_NAME}/kube-scheduler-metrics" + terraform import sumologic_http_source.state_metrics_source "${COLLECTOR_NAME}/kube-state-metrics" + fi + + # Kubernetes Secret + terraform import kubernetes_secret.sumologic_collection_secret sumologic/sumologic + + # Apply planned changes + TF_LOG_PROVIDER=DEBUG terraform apply \ + -auto-approve \ + -var="create_fields=${CREATE_FIELDS}" \ + || { echo "Error during applying Terraform changes"; exit 1; } + + # Setup Sumo Logic monitors if enabled + bash /etc/terraform/monitors.sh + + # Setup Sumo Logic dashboards if enabled + echo "Installation of the Sumo Logic dashboards is disabled." + echo "You can install them manually later with:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + + # Cleanup env variables + export SUMOLOGIC_BASE_URL= + export SUMOLOGIC_ACCESSKEY= + export SUMOLOGIC_ACCESSID= + + bash /etc/terraform/custom.sh + variables.tf: | + variable "cluster_name" { + type = string + default = "kubernetes" + } + + variable "collector_name" { + type = string + default = "kubernetes" + } + + variable "namespace_name" { + type = string + default = "sumologic" + } + + variable "create_fields" { + description = "If set, Terraform will attempt to create fields at Sumo Logic" + type = bool + default = true + } diff --git a/tests/helm/terraform/static/disabled_monitors.output.yaml b/tests/helm/terraform/static/disabled_monitors.output.yaml index 5ec8d979c7..f92dded490 100644 --- a/tests/helm/terraform/static/disabled_monitors.output.yaml +++ b/tests/helm/terraform/static/disabled_monitors.output.yaml @@ -61,6 +61,146 @@ data: cd "${target}" && bash setup.sh done + dashboards.sh: | + #!/bin/bash + + SUMOLOGIC_ACCESSID=${SUMOLOGIC_ACCESSID:=""} + readonly SUMOLOGIC_ACCESSID + SUMOLOGIC_ACCESSKEY=${SUMOLOGIC_ACCESSKEY:=""} + readonly SUMOLOGIC_ACCESSKEY + SUMOLOGIC_BASE_URL=${SUMOLOGIC_BASE_URL:=""} + readonly SUMOLOGIC_BASE_URL + + INTEGRATIONS_FOLDER_NAME="Sumo Logic Integrations" + K8S_FOLDER_NAME="Kubernetes" + K8S_APP_UUID="162ceac7-166a-4475-8427-65e170ae9837" + + function load_dashboards_folder_id() { + local ADMIN_FOLDER_JOB_ID + ADMIN_FOLDER_JOB_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended | jq '.id' | tr -d '"' )" + readonly ADMIN_FOLDER_JOB_ID + + local ADMIN_FOLDER_JOB_STATUS + ADMIN_FOLDER_JOB_STATUS="InProgress" + while [ "${ADMIN_FOLDER_JOB_STATUS}" = "InProgress" ]; do + ADMIN_FOLDER_JOB_STATUS="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended/"${ADMIN_FOLDER_JOB_ID}"/status | jq '.status' | tr -d '"' )" + + sleep 1 + done + + if [ "${ADMIN_FOLDER_JOB_STATUS}" != "Success" ]; then + echo "Could not fetch data from the \"Admin Recommended\" content folder. The K8s Dashboards won't be installed." + echo "You can still install them manually:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + exit 1 + fi + + local ADMIN_FOLDER + ADMIN_FOLDER="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended/"${ADMIN_FOLDER_JOB_ID}"/result )" + readonly ADMIN_FOLDER + + local ADMIN_FOLDER_CHILDREN + ADMIN_FOLDER_CHILDREN="$( echo "${ADMIN_FOLDER}" | jq '.children[]')" + readonly ADMIN_FOLDER_CHILDREN + + local ADMIN_FOLDER_ID + ADMIN_FOLDER_ID="$( echo "${ADMIN_FOLDER}" | jq '.id' | tr -d '"')" + readonly ADMIN_FOLDER_ID + + INTEGRATIONS_FOLDER_ID="$( echo "${ADMIN_FOLDER_CHILDREN}" | \ + jq -r "select(.name == \"${INTEGRATIONS_FOLDER_NAME}\") | .id" )" + + if [[ -z "${INTEGRATIONS_FOLDER_ID}" ]]; then + INTEGRATIONS_FOLDER_ID="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${INTEGRATIONS_FOLDER_NAME}\",\"parentId\":\"${ADMIN_FOLDER_ID}\",\"description\":\"Content provided by the Sumo Logic integrations.\"}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders | \ + jq -r " .id" )" + fi + + local INTEGRATIONS_FOLDER_CHILDREN + INTEGRATIONS_FOLDER_CHILDREN="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/"${INTEGRATIONS_FOLDER_ID}" | \ + jq '.children[]')" + readonly INTEGRATIONS_FOLDER_CHILDREN + + K8S_FOLDER_ID="$( echo "${INTEGRATIONS_FOLDER_CHILDREN}" | \ + jq -r "select(.name == \"${K8S_FOLDER_NAME}\") | .id" )" + } + + load_dashboards_folder_id + + if [[ -z "${K8S_FOLDER_ID}" ]]; then + APP_INSTALL_JOB_RESPONSE="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${K8S_FOLDER_NAME}\",\"destinationFolderId\":\"${INTEGRATIONS_FOLDER_ID}\",\"description\":\"Kubernetes dashboards provided by Sumo Logic.\"}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/"${K8S_APP_UUID}"/install )" + readonly APP_INSTALL_JOB_RESPONSE + + APP_INSTALL_JOB_ID="$(echo "${APP_INSTALL_JOB_RESPONSE}" | jq '.id' | tr -d '"' )" + readonly APP_INSTALL_JOB_ID + + APP_INSTALL_JOB_STATUS="InProgress" + while [ "${APP_INSTALL_JOB_STATUS}" = "InProgress" ]; do + APP_INSTALL_JOB_STATUS="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/install/"${APP_INSTALL_JOB_ID}"/status | jq '.status' | tr -d '"' )" + + sleep 1 + done + + if [ "${APP_INSTALL_JOB_STATUS}" != "Success" ]; then + ERROR_MSG="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/install/"${APP_INSTALL_JOB_ID}"/status )" + echo "${ERROR_MSG}" + + echo "Installation of the K8s Dashboards failed." + echo "You can still install them manually:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + exit 2 + else + load_dashboards_folder_id + + ORG_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/account/contract | jq '.orgId' | tr -d '"' )" + readonly ORG_ID + + PERMS_ERRORS=$( curl -XPUT -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"contentPermissionAssignments\": [{\"permissionName\": \"View\",\"sourceType\": \"org\",\"sourceId\": \"${ORG_ID}\",\"contentId\": \"${K8S_FOLDER_ID}\"}],\"notifyRecipients\":false,\"notificationMessage\":\"\"}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/"${K8S_FOLDER_ID}"/permissions/add | jq '.errors' ) + readonly PERMS_ERRORS + + if [ "${PERMS_ERRORS}" != "null" ]; then + echo "Setting permissions for the installed content failed." + echo "${PERMS_ERRORS}" + fi + + echo "Installation of the K8s Dashboards succeeded." + fi + else + echo "The K8s Dashboards have been already installed." + echo "You can (re)install them manually with:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + fi fields.tf: | resource "sumologic_field" "cluster" { count = var.create_fields ? 1 : 0 @@ -221,7 +361,7 @@ data: -var="monitors_disabled=${MONITORS_DISABLED}" \ || { echo "Error during applying Terraform monitors."; exit 1; } else - echo "The monitors were already installed in ${MONITORS_FOLDER_NAME}." + echo "The monitors have been already installed in ${MONITORS_FOLDER_NAME}." echo "You can (re)install them manually with:" echo "https://github.com/SumoLogic/terraform-sumologic-sumo-logic-monitor/tree/main/monitor_packages/kubernetes" fi @@ -464,6 +604,9 @@ data: echo "You can install them manually later with:" echo "https://github.com/SumoLogic/terraform-sumologic-sumo-logic-monitor/tree/main/monitor_packages/kubernetes" + # Setup Sumo Logic dashboards if enabled + bash /etc/terraform/dashboards.sh + # Cleanup env variables export SUMOLOGIC_BASE_URL= export SUMOLOGIC_ACCESSKEY= diff --git a/tests/helm/terraform/static/monitors_with_email_notifications.output.yaml b/tests/helm/terraform/static/monitors_with_email_notifications.output.yaml index 542319b890..fcca57c65a 100644 --- a/tests/helm/terraform/static/monitors_with_email_notifications.output.yaml +++ b/tests/helm/terraform/static/monitors_with_email_notifications.output.yaml @@ -61,6 +61,146 @@ data: cd "${target}" && bash setup.sh done + dashboards.sh: | + #!/bin/bash + + SUMOLOGIC_ACCESSID=${SUMOLOGIC_ACCESSID:=""} + readonly SUMOLOGIC_ACCESSID + SUMOLOGIC_ACCESSKEY=${SUMOLOGIC_ACCESSKEY:=""} + readonly SUMOLOGIC_ACCESSKEY + SUMOLOGIC_BASE_URL=${SUMOLOGIC_BASE_URL:=""} + readonly SUMOLOGIC_BASE_URL + + INTEGRATIONS_FOLDER_NAME="Sumo Logic Integrations" + K8S_FOLDER_NAME="Kubernetes" + K8S_APP_UUID="162ceac7-166a-4475-8427-65e170ae9837" + + function load_dashboards_folder_id() { + local ADMIN_FOLDER_JOB_ID + ADMIN_FOLDER_JOB_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended | jq '.id' | tr -d '"' )" + readonly ADMIN_FOLDER_JOB_ID + + local ADMIN_FOLDER_JOB_STATUS + ADMIN_FOLDER_JOB_STATUS="InProgress" + while [ "${ADMIN_FOLDER_JOB_STATUS}" = "InProgress" ]; do + ADMIN_FOLDER_JOB_STATUS="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended/"${ADMIN_FOLDER_JOB_ID}"/status | jq '.status' | tr -d '"' )" + + sleep 1 + done + + if [ "${ADMIN_FOLDER_JOB_STATUS}" != "Success" ]; then + echo "Could not fetch data from the \"Admin Recommended\" content folder. The K8s Dashboards won't be installed." + echo "You can still install them manually:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + exit 1 + fi + + local ADMIN_FOLDER + ADMIN_FOLDER="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended/"${ADMIN_FOLDER_JOB_ID}"/result )" + readonly ADMIN_FOLDER + + local ADMIN_FOLDER_CHILDREN + ADMIN_FOLDER_CHILDREN="$( echo "${ADMIN_FOLDER}" | jq '.children[]')" + readonly ADMIN_FOLDER_CHILDREN + + local ADMIN_FOLDER_ID + ADMIN_FOLDER_ID="$( echo "${ADMIN_FOLDER}" | jq '.id' | tr -d '"')" + readonly ADMIN_FOLDER_ID + + INTEGRATIONS_FOLDER_ID="$( echo "${ADMIN_FOLDER_CHILDREN}" | \ + jq -r "select(.name == \"${INTEGRATIONS_FOLDER_NAME}\") | .id" )" + + if [[ -z "${INTEGRATIONS_FOLDER_ID}" ]]; then + INTEGRATIONS_FOLDER_ID="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${INTEGRATIONS_FOLDER_NAME}\",\"parentId\":\"${ADMIN_FOLDER_ID}\",\"description\":\"Content provided by the Sumo Logic integrations.\"}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders | \ + jq -r " .id" )" + fi + + local INTEGRATIONS_FOLDER_CHILDREN + INTEGRATIONS_FOLDER_CHILDREN="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/"${INTEGRATIONS_FOLDER_ID}" | \ + jq '.children[]')" + readonly INTEGRATIONS_FOLDER_CHILDREN + + K8S_FOLDER_ID="$( echo "${INTEGRATIONS_FOLDER_CHILDREN}" | \ + jq -r "select(.name == \"${K8S_FOLDER_NAME}\") | .id" )" + } + + load_dashboards_folder_id + + if [[ -z "${K8S_FOLDER_ID}" ]]; then + APP_INSTALL_JOB_RESPONSE="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${K8S_FOLDER_NAME}\",\"destinationFolderId\":\"${INTEGRATIONS_FOLDER_ID}\",\"description\":\"Kubernetes dashboards provided by Sumo Logic.\"}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/"${K8S_APP_UUID}"/install )" + readonly APP_INSTALL_JOB_RESPONSE + + APP_INSTALL_JOB_ID="$(echo "${APP_INSTALL_JOB_RESPONSE}" | jq '.id' | tr -d '"' )" + readonly APP_INSTALL_JOB_ID + + APP_INSTALL_JOB_STATUS="InProgress" + while [ "${APP_INSTALL_JOB_STATUS}" = "InProgress" ]; do + APP_INSTALL_JOB_STATUS="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/install/"${APP_INSTALL_JOB_ID}"/status | jq '.status' | tr -d '"' )" + + sleep 1 + done + + if [ "${APP_INSTALL_JOB_STATUS}" != "Success" ]; then + ERROR_MSG="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/install/"${APP_INSTALL_JOB_ID}"/status )" + echo "${ERROR_MSG}" + + echo "Installation of the K8s Dashboards failed." + echo "You can still install them manually:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + exit 2 + else + load_dashboards_folder_id + + ORG_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/account/contract | jq '.orgId' | tr -d '"' )" + readonly ORG_ID + + PERMS_ERRORS=$( curl -XPUT -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"contentPermissionAssignments\": [{\"permissionName\": \"View\",\"sourceType\": \"org\",\"sourceId\": \"${ORG_ID}\",\"contentId\": \"${K8S_FOLDER_ID}\"}],\"notifyRecipients\":false,\"notificationMessage\":\"\"}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/"${K8S_FOLDER_ID}"/permissions/add | jq '.errors' ) + readonly PERMS_ERRORS + + if [ "${PERMS_ERRORS}" != "null" ]; then + echo "Setting permissions for the installed content failed." + echo "${PERMS_ERRORS}" + fi + + echo "Installation of the K8s Dashboards succeeded." + fi + else + echo "The K8s Dashboards have been already installed." + echo "You can (re)install them manually with:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + fi fields.tf: | resource "sumologic_field" "cluster" { count = var.create_fields ? 1 : 0 @@ -227,7 +367,7 @@ data: -var="email_notifications_missingdata=[{${NOTIFICATIONS_SETTINGS},${NOTIFICATIONS_CONTENT},run_for_trigger_types=[\"MissingData\", \"ResolvedMissingData\"]}]" \ || { echo "Error during applying Terraform monitors."; exit 1; } else - echo "The monitors were already installed in ${MONITORS_FOLDER_NAME}." + echo "The monitors have been already installed in ${MONITORS_FOLDER_NAME}." echo "You can (re)install them manually with:" echo "https://github.com/SumoLogic/terraform-sumologic-sumo-logic-monitor/tree/main/monitor_packages/kubernetes" fi @@ -468,6 +608,9 @@ data: # Setup Sumo Logic monitors if enabled bash /etc/terraform/monitors.sh + # Setup Sumo Logic dashboards if enabled + bash /etc/terraform/dashboards.sh + # Cleanup env variables export SUMOLOGIC_BASE_URL= export SUMOLOGIC_ACCESSKEY= diff --git a/tests/helm/terraform/static/monitors_with_single_email.output.yaml b/tests/helm/terraform/static/monitors_with_single_email.output.yaml index 0cb19dcc61..039e619529 100644 --- a/tests/helm/terraform/static/monitors_with_single_email.output.yaml +++ b/tests/helm/terraform/static/monitors_with_single_email.output.yaml @@ -61,6 +61,146 @@ data: cd "${target}" && bash setup.sh done + dashboards.sh: | + #!/bin/bash + + SUMOLOGIC_ACCESSID=${SUMOLOGIC_ACCESSID:=""} + readonly SUMOLOGIC_ACCESSID + SUMOLOGIC_ACCESSKEY=${SUMOLOGIC_ACCESSKEY:=""} + readonly SUMOLOGIC_ACCESSKEY + SUMOLOGIC_BASE_URL=${SUMOLOGIC_BASE_URL:=""} + readonly SUMOLOGIC_BASE_URL + + INTEGRATIONS_FOLDER_NAME="Sumo Logic Integrations" + K8S_FOLDER_NAME="Kubernetes" + K8S_APP_UUID="162ceac7-166a-4475-8427-65e170ae9837" + + function load_dashboards_folder_id() { + local ADMIN_FOLDER_JOB_ID + ADMIN_FOLDER_JOB_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended | jq '.id' | tr -d '"' )" + readonly ADMIN_FOLDER_JOB_ID + + local ADMIN_FOLDER_JOB_STATUS + ADMIN_FOLDER_JOB_STATUS="InProgress" + while [ "${ADMIN_FOLDER_JOB_STATUS}" = "InProgress" ]; do + ADMIN_FOLDER_JOB_STATUS="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended/"${ADMIN_FOLDER_JOB_ID}"/status | jq '.status' | tr -d '"' )" + + sleep 1 + done + + if [ "${ADMIN_FOLDER_JOB_STATUS}" != "Success" ]; then + echo "Could not fetch data from the \"Admin Recommended\" content folder. The K8s Dashboards won't be installed." + echo "You can still install them manually:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + exit 1 + fi + + local ADMIN_FOLDER + ADMIN_FOLDER="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended/"${ADMIN_FOLDER_JOB_ID}"/result )" + readonly ADMIN_FOLDER + + local ADMIN_FOLDER_CHILDREN + ADMIN_FOLDER_CHILDREN="$( echo "${ADMIN_FOLDER}" | jq '.children[]')" + readonly ADMIN_FOLDER_CHILDREN + + local ADMIN_FOLDER_ID + ADMIN_FOLDER_ID="$( echo "${ADMIN_FOLDER}" | jq '.id' | tr -d '"')" + readonly ADMIN_FOLDER_ID + + INTEGRATIONS_FOLDER_ID="$( echo "${ADMIN_FOLDER_CHILDREN}" | \ + jq -r "select(.name == \"${INTEGRATIONS_FOLDER_NAME}\") | .id" )" + + if [[ -z "${INTEGRATIONS_FOLDER_ID}" ]]; then + INTEGRATIONS_FOLDER_ID="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${INTEGRATIONS_FOLDER_NAME}\",\"parentId\":\"${ADMIN_FOLDER_ID}\",\"description\":\"Content provided by the Sumo Logic integrations.\"}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders | \ + jq -r " .id" )" + fi + + local INTEGRATIONS_FOLDER_CHILDREN + INTEGRATIONS_FOLDER_CHILDREN="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/"${INTEGRATIONS_FOLDER_ID}" | \ + jq '.children[]')" + readonly INTEGRATIONS_FOLDER_CHILDREN + + K8S_FOLDER_ID="$( echo "${INTEGRATIONS_FOLDER_CHILDREN}" | \ + jq -r "select(.name == \"${K8S_FOLDER_NAME}\") | .id" )" + } + + load_dashboards_folder_id + + if [[ -z "${K8S_FOLDER_ID}" ]]; then + APP_INSTALL_JOB_RESPONSE="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${K8S_FOLDER_NAME}\",\"destinationFolderId\":\"${INTEGRATIONS_FOLDER_ID}\",\"description\":\"Kubernetes dashboards provided by Sumo Logic.\"}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/"${K8S_APP_UUID}"/install )" + readonly APP_INSTALL_JOB_RESPONSE + + APP_INSTALL_JOB_ID="$(echo "${APP_INSTALL_JOB_RESPONSE}" | jq '.id' | tr -d '"' )" + readonly APP_INSTALL_JOB_ID + + APP_INSTALL_JOB_STATUS="InProgress" + while [ "${APP_INSTALL_JOB_STATUS}" = "InProgress" ]; do + APP_INSTALL_JOB_STATUS="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/install/"${APP_INSTALL_JOB_ID}"/status | jq '.status' | tr -d '"' )" + + sleep 1 + done + + if [ "${APP_INSTALL_JOB_STATUS}" != "Success" ]; then + ERROR_MSG="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/install/"${APP_INSTALL_JOB_ID}"/status )" + echo "${ERROR_MSG}" + + echo "Installation of the K8s Dashboards failed." + echo "You can still install them manually:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + exit 2 + else + load_dashboards_folder_id + + ORG_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/account/contract | jq '.orgId' | tr -d '"' )" + readonly ORG_ID + + PERMS_ERRORS=$( curl -XPUT -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"contentPermissionAssignments\": [{\"permissionName\": \"View\",\"sourceType\": \"org\",\"sourceId\": \"${ORG_ID}\",\"contentId\": \"${K8S_FOLDER_ID}\"}],\"notifyRecipients\":false,\"notificationMessage\":\"\"}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/"${K8S_FOLDER_ID}"/permissions/add | jq '.errors' ) + readonly PERMS_ERRORS + + if [ "${PERMS_ERRORS}" != "null" ]; then + echo "Setting permissions for the installed content failed." + echo "${PERMS_ERRORS}" + fi + + echo "Installation of the K8s Dashboards succeeded." + fi + else + echo "The K8s Dashboards have been already installed." + echo "You can (re)install them manually with:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + fi fields.tf: | resource "sumologic_field" "cluster" { count = var.create_fields ? 1 : 0 @@ -227,7 +367,7 @@ data: -var="email_notifications_missingdata=[{${NOTIFICATIONS_SETTINGS},${NOTIFICATIONS_CONTENT},run_for_trigger_types=[\"MissingData\", \"ResolvedMissingData\"]}]" \ || { echo "Error during applying Terraform monitors."; exit 1; } else - echo "The monitors were already installed in ${MONITORS_FOLDER_NAME}." + echo "The monitors have been already installed in ${MONITORS_FOLDER_NAME}." echo "You can (re)install them manually with:" echo "https://github.com/SumoLogic/terraform-sumologic-sumo-logic-monitor/tree/main/monitor_packages/kubernetes" fi @@ -468,6 +608,9 @@ data: # Setup Sumo Logic monitors if enabled bash /etc/terraform/monitors.sh + # Setup Sumo Logic dashboards if enabled + bash /etc/terraform/dashboards.sh + # Cleanup env variables export SUMOLOGIC_BASE_URL= export SUMOLOGIC_ACCESSKEY= diff --git a/tests/helm/terraform/static/strip_extrapolation.output.yaml b/tests/helm/terraform/static/strip_extrapolation.output.yaml index 4cbc07a5ea..d688862e3f 100644 --- a/tests/helm/terraform/static/strip_extrapolation.output.yaml +++ b/tests/helm/terraform/static/strip_extrapolation.output.yaml @@ -61,6 +61,146 @@ data: cd "${target}" && bash setup.sh done + dashboards.sh: | + #!/bin/bash + + SUMOLOGIC_ACCESSID=${SUMOLOGIC_ACCESSID:=""} + readonly SUMOLOGIC_ACCESSID + SUMOLOGIC_ACCESSKEY=${SUMOLOGIC_ACCESSKEY:=""} + readonly SUMOLOGIC_ACCESSKEY + SUMOLOGIC_BASE_URL=${SUMOLOGIC_BASE_URL:=""} + readonly SUMOLOGIC_BASE_URL + + INTEGRATIONS_FOLDER_NAME="Sumo Logic Integrations" + K8S_FOLDER_NAME="Kubernetes" + K8S_APP_UUID="162ceac7-166a-4475-8427-65e170ae9837" + + function load_dashboards_folder_id() { + local ADMIN_FOLDER_JOB_ID + ADMIN_FOLDER_JOB_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended | jq '.id' | tr -d '"' )" + readonly ADMIN_FOLDER_JOB_ID + + local ADMIN_FOLDER_JOB_STATUS + ADMIN_FOLDER_JOB_STATUS="InProgress" + while [ "${ADMIN_FOLDER_JOB_STATUS}" = "InProgress" ]; do + ADMIN_FOLDER_JOB_STATUS="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended/"${ADMIN_FOLDER_JOB_ID}"/status | jq '.status' | tr -d '"' )" + + sleep 1 + done + + if [ "${ADMIN_FOLDER_JOB_STATUS}" != "Success" ]; then + echo "Could not fetch data from the \"Admin Recommended\" content folder. The K8s Dashboards won't be installed." + echo "You can still install them manually:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + exit 1 + fi + + local ADMIN_FOLDER + ADMIN_FOLDER="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended/"${ADMIN_FOLDER_JOB_ID}"/result )" + readonly ADMIN_FOLDER + + local ADMIN_FOLDER_CHILDREN + ADMIN_FOLDER_CHILDREN="$( echo "${ADMIN_FOLDER}" | jq '.children[]')" + readonly ADMIN_FOLDER_CHILDREN + + local ADMIN_FOLDER_ID + ADMIN_FOLDER_ID="$( echo "${ADMIN_FOLDER}" | jq '.id' | tr -d '"')" + readonly ADMIN_FOLDER_ID + + INTEGRATIONS_FOLDER_ID="$( echo "${ADMIN_FOLDER_CHILDREN}" | \ + jq -r "select(.name == \"${INTEGRATIONS_FOLDER_NAME}\") | .id" )" + + if [[ -z "${INTEGRATIONS_FOLDER_ID}" ]]; then + INTEGRATIONS_FOLDER_ID="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${INTEGRATIONS_FOLDER_NAME}\",\"parentId\":\"${ADMIN_FOLDER_ID}\",\"description\":\"Content provided by the Sumo Logic integrations.\"}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders | \ + jq -r " .id" )" + fi + + local INTEGRATIONS_FOLDER_CHILDREN + INTEGRATIONS_FOLDER_CHILDREN="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/"${INTEGRATIONS_FOLDER_ID}" | \ + jq '.children[]')" + readonly INTEGRATIONS_FOLDER_CHILDREN + + K8S_FOLDER_ID="$( echo "${INTEGRATIONS_FOLDER_CHILDREN}" | \ + jq -r "select(.name == \"${K8S_FOLDER_NAME}\") | .id" )" + } + + load_dashboards_folder_id + + if [[ -z "${K8S_FOLDER_ID}" ]]; then + APP_INSTALL_JOB_RESPONSE="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${K8S_FOLDER_NAME}\",\"destinationFolderId\":\"${INTEGRATIONS_FOLDER_ID}\",\"description\":\"Kubernetes dashboards provided by Sumo Logic.\"}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/"${K8S_APP_UUID}"/install )" + readonly APP_INSTALL_JOB_RESPONSE + + APP_INSTALL_JOB_ID="$(echo "${APP_INSTALL_JOB_RESPONSE}" | jq '.id' | tr -d '"' )" + readonly APP_INSTALL_JOB_ID + + APP_INSTALL_JOB_STATUS="InProgress" + while [ "${APP_INSTALL_JOB_STATUS}" = "InProgress" ]; do + APP_INSTALL_JOB_STATUS="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/install/"${APP_INSTALL_JOB_ID}"/status | jq '.status' | tr -d '"' )" + + sleep 1 + done + + if [ "${APP_INSTALL_JOB_STATUS}" != "Success" ]; then + ERROR_MSG="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/install/"${APP_INSTALL_JOB_ID}"/status )" + echo "${ERROR_MSG}" + + echo "Installation of the K8s Dashboards failed." + echo "You can still install them manually:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + exit 2 + else + load_dashboards_folder_id + + ORG_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/account/contract | jq '.orgId' | tr -d '"' )" + readonly ORG_ID + + PERMS_ERRORS=$( curl -XPUT -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"contentPermissionAssignments\": [{\"permissionName\": \"View\",\"sourceType\": \"org\",\"sourceId\": \"${ORG_ID}\",\"contentId\": \"${K8S_FOLDER_ID}\"}],\"notifyRecipients\":false,\"notificationMessage\":\"\"}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/"${K8S_FOLDER_ID}"/permissions/add | jq '.errors' ) + readonly PERMS_ERRORS + + if [ "${PERMS_ERRORS}" != "null" ]; then + echo "Setting permissions for the installed content failed." + echo "${PERMS_ERRORS}" + fi + + echo "Installation of the K8s Dashboards succeeded." + fi + else + echo "The K8s Dashboards have been already installed." + echo "You can (re)install them manually with:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + fi fields.tf: | resource "sumologic_field" "cluster" { count = var.create_fields ? 1 : 0 @@ -221,7 +361,7 @@ data: -var="monitors_disabled=${MONITORS_DISABLED}" \ || { echo "Error during applying Terraform monitors."; exit 1; } else - echo "The monitors were already installed in ${MONITORS_FOLDER_NAME}." + echo "The monitors have been already installed in ${MONITORS_FOLDER_NAME}." echo "You can (re)install them manually with:" echo "https://github.com/SumoLogic/terraform-sumologic-sumo-logic-monitor/tree/main/monitor_packages/kubernetes" fi @@ -463,6 +603,9 @@ data: # Setup Sumo Logic monitors if enabled bash /etc/terraform/monitors.sh + # Setup Sumo Logic dashboards if enabled + bash /etc/terraform/dashboards.sh + # Cleanup env variables export SUMOLOGIC_BASE_URL= export SUMOLOGIC_ACCESSKEY= diff --git a/tests/helm/terraform/static/traces.output.yaml b/tests/helm/terraform/static/traces.output.yaml index 6a4eedaf3a..6dd6bb74da 100644 --- a/tests/helm/terraform/static/traces.output.yaml +++ b/tests/helm/terraform/static/traces.output.yaml @@ -61,6 +61,146 @@ data: cd "${target}" && bash setup.sh done + dashboards.sh: | + #!/bin/bash + + SUMOLOGIC_ACCESSID=${SUMOLOGIC_ACCESSID:=""} + readonly SUMOLOGIC_ACCESSID + SUMOLOGIC_ACCESSKEY=${SUMOLOGIC_ACCESSKEY:=""} + readonly SUMOLOGIC_ACCESSKEY + SUMOLOGIC_BASE_URL=${SUMOLOGIC_BASE_URL:=""} + readonly SUMOLOGIC_BASE_URL + + INTEGRATIONS_FOLDER_NAME="Sumo Logic Integrations" + K8S_FOLDER_NAME="Kubernetes" + K8S_APP_UUID="162ceac7-166a-4475-8427-65e170ae9837" + + function load_dashboards_folder_id() { + local ADMIN_FOLDER_JOB_ID + ADMIN_FOLDER_JOB_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended | jq '.id' | tr -d '"' )" + readonly ADMIN_FOLDER_JOB_ID + + local ADMIN_FOLDER_JOB_STATUS + ADMIN_FOLDER_JOB_STATUS="InProgress" + while [ "${ADMIN_FOLDER_JOB_STATUS}" = "InProgress" ]; do + ADMIN_FOLDER_JOB_STATUS="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended/"${ADMIN_FOLDER_JOB_ID}"/status | jq '.status' | tr -d '"' )" + + sleep 1 + done + + if [ "${ADMIN_FOLDER_JOB_STATUS}" != "Success" ]; then + echo "Could not fetch data from the \"Admin Recommended\" content folder. The K8s Dashboards won't be installed." + echo "You can still install them manually:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + exit 1 + fi + + local ADMIN_FOLDER + ADMIN_FOLDER="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/adminRecommended/"${ADMIN_FOLDER_JOB_ID}"/result )" + readonly ADMIN_FOLDER + + local ADMIN_FOLDER_CHILDREN + ADMIN_FOLDER_CHILDREN="$( echo "${ADMIN_FOLDER}" | jq '.children[]')" + readonly ADMIN_FOLDER_CHILDREN + + local ADMIN_FOLDER_ID + ADMIN_FOLDER_ID="$( echo "${ADMIN_FOLDER}" | jq '.id' | tr -d '"')" + readonly ADMIN_FOLDER_ID + + INTEGRATIONS_FOLDER_ID="$( echo "${ADMIN_FOLDER_CHILDREN}" | \ + jq -r "select(.name == \"${INTEGRATIONS_FOLDER_NAME}\") | .id" )" + + if [[ -z "${INTEGRATIONS_FOLDER_ID}" ]]; then + INTEGRATIONS_FOLDER_ID="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${INTEGRATIONS_FOLDER_NAME}\",\"parentId\":\"${ADMIN_FOLDER_ID}\",\"description\":\"Content provided by the Sumo Logic integrations.\"}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders | \ + jq -r " .id" )" + fi + + local INTEGRATIONS_FOLDER_CHILDREN + INTEGRATIONS_FOLDER_CHILDREN="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + "${SUMOLOGIC_BASE_URL}"v2/content/folders/"${INTEGRATIONS_FOLDER_ID}" | \ + jq '.children[]')" + readonly INTEGRATIONS_FOLDER_CHILDREN + + K8S_FOLDER_ID="$( echo "${INTEGRATIONS_FOLDER_CHILDREN}" | \ + jq -r "select(.name == \"${K8S_FOLDER_NAME}\") | .id" )" + } + + load_dashboards_folder_id + + if [[ -z "${K8S_FOLDER_ID}" ]]; then + APP_INSTALL_JOB_RESPONSE="$(curl -XPOST -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"name\":\"${K8S_FOLDER_NAME}\",\"destinationFolderId\":\"${INTEGRATIONS_FOLDER_ID}\",\"description\":\"Kubernetes dashboards provided by Sumo Logic.\"}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/"${K8S_APP_UUID}"/install )" + readonly APP_INSTALL_JOB_RESPONSE + + APP_INSTALL_JOB_ID="$(echo "${APP_INSTALL_JOB_RESPONSE}" | jq '.id' | tr -d '"' )" + readonly APP_INSTALL_JOB_ID + + APP_INSTALL_JOB_STATUS="InProgress" + while [ "${APP_INSTALL_JOB_STATUS}" = "InProgress" ]; do + APP_INSTALL_JOB_STATUS="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/install/"${APP_INSTALL_JOB_ID}"/status | jq '.status' | tr -d '"' )" + + sleep 1 + done + + if [ "${APP_INSTALL_JOB_STATUS}" != "Success" ]; then + ERROR_MSG="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/apps/install/"${APP_INSTALL_JOB_ID}"/status )" + echo "${ERROR_MSG}" + + echo "Installation of the K8s Dashboards failed." + echo "You can still install them manually:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + exit 2 + else + load_dashboards_folder_id + + ORG_ID="$(curl -XGET -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + "${SUMOLOGIC_BASE_URL}"v1/account/contract | jq '.orgId' | tr -d '"' )" + readonly ORG_ID + + PERMS_ERRORS=$( curl -XPUT -s \ + -u "${SUMOLOGIC_ACCESSID}:${SUMOLOGIC_ACCESSKEY}" \ + -H "isAdminMode: true" \ + -H "Content-Type: application/json" \ + -d "{\"contentPermissionAssignments\": [{\"permissionName\": \"View\",\"sourceType\": \"org\",\"sourceId\": \"${ORG_ID}\",\"contentId\": \"${K8S_FOLDER_ID}\"}],\"notifyRecipients\":false,\"notificationMessage\":\"\"}" \ + "${SUMOLOGIC_BASE_URL}"v2/content/"${K8S_FOLDER_ID}"/permissions/add | jq '.errors' ) + readonly PERMS_ERRORS + + if [ "${PERMS_ERRORS}" != "null" ]; then + echo "Setting permissions for the installed content failed." + echo "${PERMS_ERRORS}" + fi + + echo "Installation of the K8s Dashboards succeeded." + fi + else + echo "The K8s Dashboards have been already installed." + echo "You can (re)install them manually with:" + echo "https://help.sumologic.com/07Sumo-Logic-Apps/10Containers_and_Orchestration/Kubernetes/Install_the_Kubernetes_App%2C_Alerts%2C_and_view_the_Dashboards#install-the-app" + fi fields.tf: | resource "sumologic_field" "cluster" { count = var.create_fields ? 1 : 0 @@ -213,7 +353,7 @@ data: -var="monitors_disabled=${MONITORS_DISABLED}" \ || { echo "Error during applying Terraform monitors."; exit 1; } else - echo "The monitors were already installed in ${MONITORS_FOLDER_NAME}." + echo "The monitors have been already installed in ${MONITORS_FOLDER_NAME}." echo "You can (re)install them manually with:" echo "https://github.com/SumoLogic/terraform-sumologic-sumo-logic-monitor/tree/main/monitor_packages/kubernetes" fi @@ -398,6 +538,9 @@ data: # Setup Sumo Logic monitors if enabled bash /etc/terraform/monitors.sh + # Setup Sumo Logic dashboards if enabled + bash /etc/terraform/dashboards.sh + # Cleanup env variables export SUMOLOGIC_BASE_URL= export SUMOLOGIC_ACCESSKEY=