diff --git a/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml b/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml index cf290b0ec6..4871e399fd 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_pgadmins.yaml @@ -2048,6 +2048,75 @@ spec: - message: must be at least one hour rule: duration("1h") <= self && self <= duration("8760h") type: object + metrics: + description: Metrics is the place for users to configure metrics + collection. + properties: + customQueries: + description: |- + Where users can turn off built-in metrics and also provide their own + custom queries. + properties: + add: + description: User defined queries and metrics. + items: + properties: + collectionInterval: + default: 5s + description: How often the queries should be run. + format: duration + maxLength: 20 + minLength: 1 + pattern: ^((PT)?( *[0-9]+ *(?i:(ms|s|m)|(milli|sec|min)s?))+|0)$ + type: string + x-kubernetes-validations: + - rule: duration("0") <= self && self <= duration("60m") + name: + description: |- + The name of this batch of queries, which will be used in naming the OTel + SqlQuery receiver. + maxLength: 20 + pattern: ^[^\pZ\pC\pS]+$ + type: string + queries: + description: A ConfigMap holding the yaml file that + contains the queries. + properties: + key: + description: Name of the data field within the + ConfigMap. + maxLength: 253 + minLength: 1 + pattern: ^[-._a-zA-Z0-9]+$ + type: string + x-kubernetes-validations: + - message: cannot be "." or start with ".." + rule: self != "." && !self.startsWith("..") + name: + description: Name of the ConfigMap. + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?([.][a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + required: + - key + - name + type: object + x-kubernetes-map-type: atomic + required: + - name + - queries + type: object + type: array + remove: + description: |- + A list of built-in queries that should be removed. If all queries for a + given SQL statement are removed, the SQL statement will no longer be run. + items: + type: string + type: array + type: object + type: object resources: description: Resources holds the resource requirements for the collector container. diff --git a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml index 26e1d31154..3136b18332 100644 --- a/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml +++ b/config/crd/bases/postgres-operator.crunchydata.com_postgresclusters.yaml @@ -11695,6 +11695,75 @@ spec: - message: must be at least one hour rule: duration("1h") <= self && self <= duration("8760h") type: object + metrics: + description: Metrics is the place for users to configure metrics + collection. + properties: + customQueries: + description: |- + Where users can turn off built-in metrics and also provide their own + custom queries. + properties: + add: + description: User defined queries and metrics. + items: + properties: + collectionInterval: + default: 5s + description: How often the queries should be run. + format: duration + maxLength: 20 + minLength: 1 + pattern: ^((PT)?( *[0-9]+ *(?i:(ms|s|m)|(milli|sec|min)s?))+|0)$ + type: string + x-kubernetes-validations: + - rule: duration("0") <= self && self <= duration("60m") + name: + description: |- + The name of this batch of queries, which will be used in naming the OTel + SqlQuery receiver. + maxLength: 20 + pattern: ^[^\pZ\pC\pS]+$ + type: string + queries: + description: A ConfigMap holding the yaml file that + contains the queries. + properties: + key: + description: Name of the data field within the + ConfigMap. + maxLength: 253 + minLength: 1 + pattern: ^[-._a-zA-Z0-9]+$ + type: string + x-kubernetes-validations: + - message: cannot be "." or start with ".." + rule: self != "." && !self.startsWith("..") + name: + description: Name of the ConfigMap. + maxLength: 253 + minLength: 1 + pattern: ^[a-z0-9]([-a-z0-9]*[a-z0-9])?([.][a-z0-9]([-a-z0-9]*[a-z0-9])?)*$ + type: string + required: + - key + - name + type: object + x-kubernetes-map-type: atomic + required: + - name + - queries + type: object + type: array + remove: + description: |- + A list of built-in queries that should be removed. If all queries for a + given SQL statement are removed, the SQL statement will no longer be run. + items: + type: string + type: array + type: object + type: object resources: description: Resources holds the resource requirements for the collector container. diff --git a/internal/collector/instance.go b/internal/collector/instance.go index 970f9c9109..9c83f11f3a 100644 --- a/internal/collector/instance.go +++ b/internal/collector/instance.go @@ -43,11 +43,12 @@ func AddToPod( spec *v1beta1.InstrumentationSpec, pullPolicy corev1.PullPolicy, inInstanceConfigMap *corev1.ConfigMap, - outPod *corev1.PodSpec, + template *corev1.PodTemplateSpec, volumeMounts []corev1.VolumeMount, sqlQueryPassword string, logDirectories []string, includeLogrotate bool, + thisPodServesMetrics bool, ) { if spec == nil || !(feature.Enabled(ctx, feature.OpenTelemetryLogs) || @@ -76,14 +77,13 @@ func AddToPod( }}, } - // If the user has specified files to be mounted in the spec, add them to the projected config volume - if spec != nil && spec.Config != nil && spec.Config.Files != nil { - configVolume.Projected.Sources = append(configVolume.Projected.Sources, spec.Config.Files...) + // If the user has specified files to be mounted in the spec, add them to + // the projected config volume + if spec.Config != nil && spec.Config.Files != nil { + configVolume.Projected.Sources = append(configVolume.Projected.Sources, + spec.Config.Files...) } - // Add configVolume to the pod's volumes - outPod.Volumes = append(outPod.Volumes, configVolume) - // Create collector container container := corev1.Container{ Name: naming.ContainerCollector, @@ -113,6 +113,28 @@ func AddToPod( VolumeMounts: append(volumeMounts, configVolumeMount), } + // If metrics feature is enabled and this Pod serves metrics, add the + // Prometheus port to this container + if feature.Enabled(ctx, feature.OpenTelemetryMetrics) && thisPodServesMetrics { + container.Ports = []corev1.ContainerPort{{ + ContainerPort: int32(PrometheusPort), + Name: "otel-metrics", + Protocol: corev1.ProtocolTCP, + }} + + // If the user has specified custom queries to add, put the queries + // file(s) in the projected config volume + if spec.Metrics != nil && spec.Metrics.CustomQueries != nil && + spec.Metrics.CustomQueries.Add != nil { + for _, querySet := range spec.Metrics.CustomQueries.Add { + projection := querySet.Queries.AsProjection(querySet.Name + + "/" + querySet.Queries.Key) + configVolume.Projected.Sources = append(configVolume.Projected.Sources, + corev1.VolumeProjection{ConfigMap: &projection}) + } + } + } + // If this is a pod that uses logrotate for log rotation, add config volume // and mount for logrotate config if includeLogrotate { @@ -136,18 +158,17 @@ func AddToPod( }}, } container.VolumeMounts = append(container.VolumeMounts, logrotateConfigVolumeMount) - outPod.Volumes = append(outPod.Volumes, logrotateConfigVolume) + template.Spec.Volumes = append(template.Spec.Volumes, logrotateConfigVolume) } - if feature.Enabled(ctx, feature.OpenTelemetryMetrics) { - container.Ports = []corev1.ContainerPort{{ - ContainerPort: int32(8889), - Name: "otel-metrics", - Protocol: corev1.ProtocolTCP, - }} - } + // Add configVolume to the Pod's volumes and add the collector container to + // the Pod's containers + template.Spec.Volumes = append(template.Spec.Volumes, configVolume) + template.Spec.Containers = append(template.Spec.Containers, container) - outPod.Containers = append(outPod.Containers, container) + // add the OTel collector label to the Pod + initialize.Labels(template) + template.Labels[naming.LabelCollectorDiscovery] = "true" } // startCommand generates the command script used by the collector container @@ -192,7 +213,8 @@ while read -r -t 5 -u "${fd}" ||:; do done `, mkdirScript, configDirectory, logrotateCommand) - wrapper := `monitor() {` + startScript + `}; export directory="$1"; export -f monitor; exec -a "$0" bash -ceu monitor` + wrapper := `monitor() {` + startScript + + `}; export directory="$1"; export -f monitor; exec -a "$0" bash -ceu monitor` return []string{"bash", "-ceu", "--", wrapper, "collector", configDirectory} } diff --git a/internal/collector/naming.go b/internal/collector/naming.go index 964d3d4d13..c8db6d6f21 100644 --- a/internal/collector/naming.go +++ b/internal/collector/naming.go @@ -10,6 +10,7 @@ const LogsBatchProcessor = "batch/logs" const OneSecondBatchProcessor = "batch/1s" const SubSecondBatchProcessor = "batch/200ms" const Prometheus = "prometheus" +const PrometheusPort = 9187 const PGBouncerMetrics = "metrics/pgbouncer" const PostgresMetrics = "metrics/postgres" const PatroniMetrics = "metrics/patroni" diff --git a/internal/collector/patroni.go b/internal/collector/patroni.go index 60305b458b..532d103db7 100644 --- a/internal/collector/patroni.go +++ b/internal/collector/patroni.go @@ -7,6 +7,7 @@ package collector import ( "context" "slices" + "strconv" "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/naming" @@ -136,7 +137,7 @@ func EnablePatroniMetrics(ctx context.Context, if feature.Enabled(ctx, feature.OpenTelemetryMetrics) { // Add Prometheus exporter outConfig.Exporters[Prometheus] = map[string]any{ - "endpoint": "0.0.0.0:9187", + "endpoint": "0.0.0.0:" + strconv.Itoa(PrometheusPort), } // Add Prometheus Receiver diff --git a/internal/collector/pgbouncer.go b/internal/collector/pgbouncer.go index f1f150f6f4..9133bd6813 100644 --- a/internal/collector/pgbouncer.go +++ b/internal/collector/pgbouncer.go @@ -10,6 +10,7 @@ import ( "encoding/json" "fmt" "slices" + "strconv" "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/naming" @@ -174,13 +175,14 @@ func EnablePgBouncerMetrics(ctx context.Context, config *Config, sqlQueryUsernam if feature.Enabled(ctx, feature.OpenTelemetryMetrics) { // Add Prometheus exporter config.Exporters[Prometheus] = map[string]any{ - "endpoint": "0.0.0.0:9187", + "endpoint": "0.0.0.0:" + strconv.Itoa(PrometheusPort), } // Add SqlQuery Receiver config.Receivers[SqlQuery] = map[string]any{ "driver": "postgres", - "datasource": fmt.Sprintf(`host=localhost dbname=pgbouncer port=5432 user=%s password=${env:PGPASSWORD}`, + "datasource": fmt.Sprintf( + `host=localhost dbname=pgbouncer port=5432 user=%s password=${env:PGPASSWORD}`, sqlQueryUsername), "queries": slices.Clone(pgBouncerMetricsQueries), } diff --git a/internal/collector/postgres_metrics.go b/internal/collector/postgres_metrics.go index 5d56afbf00..b6bd39cd87 100644 --- a/internal/collector/postgres_metrics.go +++ b/internal/collector/postgres_metrics.go @@ -10,8 +10,10 @@ import ( "encoding/json" "fmt" "slices" + "strconv" "github.com/crunchydata/postgres-operator/internal/feature" + "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/pgmonitor" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -36,32 +38,100 @@ var gtePG16 json.RawMessage //go:embed "generated/lt_pg16_metrics.json" var ltPG16 json.RawMessage +type queryMetrics struct { + Metrics []*metric `json:"metrics"` + Query string `json:"sql"` +} + +type metric struct { + Aggregation string `json:"aggregation,omitempty"` + AttributeColumns []string `json:"attribute_columns,omitempty"` + DataType string `json:"data_type,omitempty"` + Description string `json:"description,omitempty"` + MetricName string `json:"metric_name"` + Monotonic bool `json:"monotonic,omitempty"` + StartTsColumn string `json:"start_ts_column,omitempty"` + StaticAttributes map[string]string `json:"static_attributes,omitempty"` + TsColumn string `json:"ts_column,omitempty"` + Unit string `json:"unit,omitempty"` + ValueColumn string `json:"value_column"` + ValueType string `json:"value_type,omitempty"` +} + func EnablePostgresMetrics(ctx context.Context, inCluster *v1beta1.PostgresCluster, config *Config) { if feature.Enabled(ctx, feature.OpenTelemetryMetrics) { + log := logging.FromContext(ctx) + var err error + // We must create a copy of the fiveSecondMetrics variable, otherwise we // will continually append to it and blow up our ConfigMap fiveSecondMetricsClone := slices.Clone(fiveSecondMetrics) + fiveMinuteMetricsClone := slices.Clone(fiveMinuteMetrics) if inCluster.Spec.PostgresVersion >= 17 { - fiveSecondMetricsClone, _ = appendToJSONArray(fiveSecondMetricsClone, gtePG17) + fiveSecondMetricsClone, err = appendToJSONArray(fiveSecondMetricsClone, gtePG17) } else { - fiveSecondMetricsClone, _ = appendToJSONArray(fiveSecondMetricsClone, ltPG17) + fiveSecondMetricsClone, err = appendToJSONArray(fiveSecondMetricsClone, ltPG17) + } + if err != nil { + log.Error(err, "error compiling postgres metrics") } if inCluster.Spec.PostgresVersion >= 16 { - fiveSecondMetricsClone, _ = appendToJSONArray(fiveSecondMetricsClone, gtePG16) + fiveSecondMetricsClone, err = appendToJSONArray(fiveSecondMetricsClone, gtePG16) } else { - fiveSecondMetricsClone, _ = appendToJSONArray(fiveSecondMetricsClone, ltPG16) + fiveSecondMetricsClone, err = appendToJSONArray(fiveSecondMetricsClone, ltPG16) + } + if err != nil { + log.Error(err, "error compiling postgres metrics") + } + + // Remove any queries that user has specified in the spec + if inCluster.Spec.Instrumentation != nil && + inCluster.Spec.Instrumentation.Metrics != nil && + inCluster.Spec.Instrumentation.Metrics.CustomQueries != nil && + inCluster.Spec.Instrumentation.Metrics.CustomQueries.Remove != nil { + + // Convert json to array of queryMetrics objects + var fiveSecondMetricsArr []queryMetrics + err := json.Unmarshal(fiveSecondMetricsClone, &fiveSecondMetricsArr) + if err != nil { + log.Error(err, "error compiling postgres metrics") + } + + // Remove any specified metrics from the five second metrics + fiveSecondMetricsArr = removeMetricsFromQueries( + inCluster.Spec.Instrumentation.Metrics.CustomQueries.Remove, fiveSecondMetricsArr) + + // Convert json to array of queryMetrics objects + var fiveMinuteMetricsArr []queryMetrics + err = json.Unmarshal(fiveMinuteMetricsClone, &fiveMinuteMetricsArr) + if err != nil { + log.Error(err, "error compiling postgres metrics") + } + + // Remove any specified metrics from the five minute metrics + fiveMinuteMetricsArr = removeMetricsFromQueries( + inCluster.Spec.Instrumentation.Metrics.CustomQueries.Remove, fiveMinuteMetricsArr) + + // Convert back to json data + // The error return value can be ignored as the errchkjson linter + // deems the []queryMetrics to be a safe argument: + // https://github.com/breml/errchkjson + fiveSecondMetricsClone, _ = json.Marshal(fiveSecondMetricsArr) + fiveMinuteMetricsClone, _ = json.Marshal(fiveMinuteMetricsArr) } // Add Prometheus exporter config.Exporters[Prometheus] = map[string]any{ - "endpoint": "0.0.0.0:9187", + "endpoint": "0.0.0.0:" + strconv.Itoa(PrometheusPort), } config.Receivers[FiveSecondSqlQuery] = map[string]any{ - "driver": "postgres", - "datasource": fmt.Sprintf(`host=localhost dbname=postgres port=5432 user=%s password=${env:PGPASSWORD}`, pgmonitor.MonitoringUser), + "driver": "postgres", + "datasource": fmt.Sprintf( + `host=localhost dbname=postgres port=5432 user=%s password=${env:PGPASSWORD}`, + pgmonitor.MonitoringUser), "collection_interval": "5s", // Give Postgres time to finish setup. "initial_delay": "10s", @@ -69,13 +139,16 @@ func EnablePostgresMetrics(ctx context.Context, inCluster *v1beta1.PostgresClust } config.Receivers[FiveMinuteSqlQuery] = map[string]any{ - "driver": "postgres", - "datasource": fmt.Sprintf(`host=localhost dbname=postgres port=5432 user=%s password=${env:PGPASSWORD}`, pgmonitor.MonitoringUser), + "driver": "postgres", + "datasource": fmt.Sprintf( + `host=localhost dbname=postgres port=5432 user=%s password=${env:PGPASSWORD}`, + pgmonitor.MonitoringUser), "collection_interval": "300s", // Give Postgres time to finish setup. "initial_delay": "10s", - "queries": slices.Clone(fiveMinuteMetrics), + "queries": slices.Clone(fiveMinuteMetricsClone), } + // Add Metrics Pipeline config.Pipelines[PostgresMetrics] = Pipeline{ Receivers: []ComponentID{FiveSecondSqlQuery, FiveMinuteSqlQuery}, @@ -85,6 +158,34 @@ func EnablePostgresMetrics(ctx context.Context, inCluster *v1beta1.PostgresClust }, Exporters: []ComponentID{Prometheus}, } + + // Add custom queries if they are defined in the spec + if inCluster.Spec.Instrumentation != nil && + inCluster.Spec.Instrumentation.Metrics != nil && + inCluster.Spec.Instrumentation.Metrics.CustomQueries != nil && + inCluster.Spec.Instrumentation.Metrics.CustomQueries.Add != nil { + + for _, querySet := range inCluster.Spec.Instrumentation.Metrics.CustomQueries.Add { + // Create a receiver for the query set + receiverName := "sqlquery/" + querySet.Name + config.Receivers[receiverName] = map[string]any{ + "driver": "postgres", + "datasource": fmt.Sprintf( + `host=localhost dbname=postgres port=5432 user=%s password=${env:PGPASSWORD}`, + pgmonitor.MonitoringUser), + "collection_interval": querySet.CollectionInterval, + // Give Postgres time to finish setup. + "initial_delay": "10s", + "queries": "${file:/etc/otel-collector/" + + querySet.Name + "/" + querySet.Queries.Key + "}", + } + + // Add the receiver to the pipeline + pipeline := config.Pipelines[PostgresMetrics] + pipeline.Receivers = append(pipeline.Receivers, receiverName) + config.Pipelines[PostgresMetrics] = pipeline + } + } } } @@ -110,3 +211,42 @@ func appendToJSONArray(a1, a2 json.RawMessage) (json.RawMessage, error) { return merged, nil } + +func removeMetricsFromQueries(metricsToRemove []string, + queryMetricsArr []queryMetrics, +) []queryMetrics { + // Iterate over the metrics that should be removed +Outer: + for _, metricToRemove := range metricsToRemove { + // Iterate over array of query/metrics objects + for j, queryAndMetrics := range queryMetricsArr { + // Iterate over the metrics array + metricsArr := queryAndMetrics.Metrics + for k, metric := range metricsArr { + // Check to see if the metric_name matches the metricToRemove + if metric.MetricName == metricToRemove { + // Remove the metric. Since there won't ever be any + // duplicates, we will be exiting this loop early and + // therefore don't care about the order of the metrics + // array. + metricsArr[len(metricsArr)-1], metricsArr[k] = nil, metricsArr[len(metricsArr)-1] + metricsArr = metricsArr[:len(metricsArr)-1] + queryMetricsArr[j].Metrics = metricsArr + + // If the metrics array is empty, remove the query/metrics + // map entirely. Again, we don't care about order. + if len(metricsArr) == 0 { + queryMetricsArr[j] = queryMetricsArr[len(queryMetricsArr)-1] + queryMetricsArr = queryMetricsArr[:len(queryMetricsArr)-1] + } + + // We found and deleted the metric, so we can continue + // to the next iteration of the Outer loop. + continue Outer + } + } + } + } + + return queryMetricsArr +} diff --git a/internal/collector/postgres_metrics_test.go b/internal/collector/postgres_metrics_test.go new file mode 100644 index 0000000000..8a22f42b52 --- /dev/null +++ b/internal/collector/postgres_metrics_test.go @@ -0,0 +1,121 @@ +// Copyright 2024 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package collector + +import ( + "encoding/json" + "testing" + + "gotest.tools/v3/assert" +) + +func TestRemoveMetricsFromQueries(t *testing.T) { + // Convert json to map + var fiveMinuteMetricsArr []queryMetrics + err := json.Unmarshal(fiveMinuteMetrics, &fiveMinuteMetricsArr) + assert.NilError(t, err) + + assert.Equal(t, len(fiveMinuteMetricsArr), 3) + newArr := removeMetricsFromQueries([]string{"ccp_database_size_bytes"}, fiveMinuteMetricsArr) + assert.Equal(t, len(newArr), 2) + + t.Run("DeleteOneMetric", func(t *testing.T) { + sqlMetricsData := `[ + { + "metrics": [ + { + "description": "Count of sequences that have reached greater than or equal to 75% of their max available numbers.\nFunction monitor.sequence_status() can provide more details if run directly on system.\n", + "metric_name": "ccp_sequence_exhaustion_count", + "static_attributes": { "server": "localhost:5432" }, + "value_column": "count" + } + ], + "sql": "SELECT count(*) AS count FROM (\n SELECT CEIL((s.max_value-min_value::NUMERIC+1)/s.increment_by::NUMERIC) AS slots\n , CEIL((COALESCE(s.last_value,s.min_value)-s.min_value::NUMERIC+1)/s.increment_by::NUMERIC) AS used\n FROM pg_catalog.pg_sequences s\n) x WHERE (ROUND(used/slots*100)::int) \u003e 75;\n" + }, + { + "metrics": [ + { + "attribute_columns": ["dbname"], + "description": "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary", + "metric_name": "ccp_stat_database_blks_hit", + "static_attributes": { "server": "localhost:5432" }, + "value_column": "blks_hit" + }, + { + "attribute_columns": ["dbname"], + "description": "Number of disk blocks read in this database", + "metric_name": "ccp_stat_database_blks_read", + "static_attributes": { "server": "localhost:5432" }, + "value_column": "blks_read" + } + ], + "sql": "SELECT s.datname AS dbname , s.xact_commit , s.xact_rollback , s.blks_read , s.blks_hit , s.tup_returned , s.tup_fetched , s.tup_inserted , s.tup_updated , s.tup_deleted , s.conflicts , s.temp_files , s.temp_bytes , s.deadlocks FROM pg_catalog.pg_stat_database s JOIN pg_catalog.pg_database d ON d.datname = s.datname WHERE d.datistemplate = false;\n" + } +]` + var sqlMetricsArr []queryMetrics + err := json.Unmarshal([]byte(sqlMetricsData), &sqlMetricsArr) + assert.NilError(t, err) + + assert.Equal(t, len(sqlMetricsArr), 2) + metricsArr := sqlMetricsArr[1].Metrics + assert.Equal(t, len(metricsArr), 2) + + refinedSqlMetricsArr := removeMetricsFromQueries([]string{"ccp_stat_database_blks_hit"}, sqlMetricsArr) + assert.Equal(t, len(refinedSqlMetricsArr), 2) + metricsArr = refinedSqlMetricsArr[1].Metrics + assert.Equal(t, len(metricsArr), 1) + remainingMetric := metricsArr[0] + assert.Equal(t, remainingMetric.MetricName, "ccp_stat_database_blks_read") + }) + + t.Run("DeleteQueryMetricSet", func(t *testing.T) { + sqlMetricsData := `[ + { + "metrics": [ + { + "description": "Count of sequences that have reached greater than or equal to 75% of their max available numbers.\nFunction monitor.sequence_status() can provide more details if run directly on system.\n", + "metric_name": "ccp_sequence_exhaustion_count", + "static_attributes": { "server": "localhost:5432" }, + "value_column": "count" + } + ], + "sql": "SELECT count(*) AS count FROM (\n SELECT CEIL((s.max_value-min_value::NUMERIC+1)/s.increment_by::NUMERIC) AS slots\n , CEIL((COALESCE(s.last_value,s.min_value)-s.min_value::NUMERIC+1)/s.increment_by::NUMERIC) AS used\n FROM pg_catalog.pg_sequences s\n) x WHERE (ROUND(used/slots*100)::int) \u003e 75;\n" + }, + { + "metrics": [ + { + "attribute_columns": ["dbname"], + "description": "Number of times disk blocks were found already in the buffer cache, so that a read was not necessary", + "metric_name": "ccp_stat_database_blks_hit", + "static_attributes": { "server": "localhost:5432" }, + "value_column": "blks_hit" + }, + { + "attribute_columns": ["dbname"], + "description": "Number of disk blocks read in this database", + "metric_name": "ccp_stat_database_blks_read", + "static_attributes": { "server": "localhost:5432" }, + "value_column": "blks_read" + } + ], + "sql": "SELECT s.datname AS dbname , s.xact_commit , s.xact_rollback , s.blks_read , s.blks_hit , s.tup_returned , s.tup_fetched , s.tup_inserted , s.tup_updated , s.tup_deleted , s.conflicts , s.temp_files , s.temp_bytes , s.deadlocks FROM pg_catalog.pg_stat_database s JOIN pg_catalog.pg_database d ON d.datname = s.datname WHERE d.datistemplate = false;\n" + } +]` + var sqlMetricsArr []queryMetrics + err := json.Unmarshal([]byte(sqlMetricsData), &sqlMetricsArr) + assert.NilError(t, err) + + assert.Equal(t, len(sqlMetricsArr), 2) + metricsArr := sqlMetricsArr[1].Metrics + assert.Equal(t, len(metricsArr), 2) + + refinedSqlMetricsArr := removeMetricsFromQueries([]string{"ccp_stat_database_blks_hit", + "ccp_stat_database_blks_read"}, sqlMetricsArr) + assert.Equal(t, len(refinedSqlMetricsArr), 1) + metricsArr = sqlMetricsArr[0].Metrics + assert.Equal(t, len(metricsArr), 1) + }) + +} diff --git a/internal/controller/postgrescluster/instance.go b/internal/controller/postgrescluster/instance.go index 5c9786459d..d6fc6158e8 100644 --- a/internal/controller/postgrescluster/instance.go +++ b/internal/controller/postgrescluster/instance.go @@ -1220,9 +1220,9 @@ func (r *Reconciler) reconcileInstance( // For now, we are not using logrotate to rotate postgres or patroni logs // but we are using it for pgbackrest logs in the postgres pod - collector.AddToPod(ctx, cluster.Spec.Instrumentation, cluster.Spec.ImagePullPolicy, instanceConfigMap, &instance.Spec.Template.Spec, + collector.AddToPod(ctx, cluster.Spec.Instrumentation, cluster.Spec.ImagePullPolicy, instanceConfigMap, &instance.Spec.Template, []corev1.VolumeMount{postgres.DataVolumeMount()}, pgPassword, - []string{naming.PGBackRestPGDataLogPath}, true) + []string{naming.PGBackRestPGDataLogPath}, true, true) } // Add postgres-exporter to the instance Pod spec diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index 49d1f8c8ce..41d1b942a1 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -697,8 +697,8 @@ func (r *Reconciler) generateRepoHostIntent(ctx context.Context, postgresCluster if postgresCluster.Spec.Instrumentation != nil && feature.Enabled(ctx, feature.OpenTelemetryLogs) { collector.AddToPod(ctx, postgresCluster.Spec.Instrumentation, postgresCluster.Spec.ImagePullPolicy, &corev1.ConfigMap{ObjectMeta: naming.PGBackRestConfig(postgresCluster)}, - &repo.Spec.Template.Spec, []corev1.VolumeMount{}, "", - []string{pgBackRestLogPath}, true) + &repo.Spec.Template, []corev1.VolumeMount{}, "", + []string{pgBackRestLogPath}, true, false) containersToAdd = append(containersToAdd, naming.ContainerCollector) } diff --git a/internal/controller/postgrescluster/pgbouncer.go b/internal/controller/postgrescluster/pgbouncer.go index d5a935bbf3..660572005a 100644 --- a/internal/controller/postgrescluster/pgbouncer.go +++ b/internal/controller/postgrescluster/pgbouncer.go @@ -472,7 +472,7 @@ func (r *Reconciler) generatePGBouncerDeployment( err := errors.WithStack(r.setControllerReference(cluster, deploy)) if err == nil { - pgbouncer.Pod(ctx, cluster, configmap, primaryCertificate, secret, &deploy.Spec.Template.Spec) + pgbouncer.Pod(ctx, cluster, configmap, primaryCertificate, secret, &deploy.Spec.Template) } // Add tmp directory and volume for log files diff --git a/internal/controller/standalone_pgadmin/statefulset.go b/internal/controller/standalone_pgadmin/statefulset.go index c75668defc..6e606b0867 100644 --- a/internal/controller/standalone_pgadmin/statefulset.go +++ b/internal/controller/standalone_pgadmin/statefulset.go @@ -134,7 +134,8 @@ func statefulset( } collector.AddToPod(ctx, pgadmin.Spec.Instrumentation, pgadmin.Spec.ImagePullPolicy, - configmap, &sts.Spec.Template.Spec, volumeMounts, "", []string{LogDirectoryAbsolutePath}, false) + configmap, &sts.Spec.Template, volumeMounts, "", []string{LogDirectoryAbsolutePath}, + false, false) } postgrescluster.AddTMPEmptyDir(&sts.Spec.Template) diff --git a/internal/naming/labels.go b/internal/naming/labels.go index 96724fda8b..209af0367b 100644 --- a/internal/naming/labels.go +++ b/internal/naming/labels.go @@ -40,6 +40,10 @@ const ( // LabelMovePGWalDir is used to identify the Job that moves an existing pg_wal directory. LabelMovePGWalDir = labelPrefix + "move-pgwal-dir" + // LabelCollectorDiscovery is added to Pods running the OpenTelemetry "collector" + // container to support discovery by Prometheus + LabelCollectorDiscovery = labelPrefix + "crunchy-otel-collector" + // LabelPGBackRest is used to indicate that a resource is for pgBackRest LabelPGBackRest = labelPrefix + "pgbackrest" diff --git a/internal/pgbouncer/reconcile.go b/internal/pgbouncer/reconcile.go index 66cf1c8df5..b663596ed7 100644 --- a/internal/pgbouncer/reconcile.go +++ b/internal/pgbouncer/reconcile.go @@ -127,7 +127,7 @@ func Pod( inConfigMap *corev1.ConfigMap, inPostgreSQLCertificate *corev1.SecretProjection, inSecret *corev1.Secret, - outPod *corev1.PodSpec, + template *corev1.PodTemplateSpec, ) { if inCluster.Spec.Proxy == nil || inCluster.Spec.Proxy.PGBouncer == nil { // PgBouncer is disabled; there is nothing to do. @@ -196,21 +196,21 @@ func Pod( reloader.Resources = *inCluster.Spec.Proxy.PGBouncer.Sidecars.PGBouncerConfig.Resources } - outPod.Containers = []corev1.Container{container, reloader} + template.Spec.Containers = []corev1.Container{container, reloader} // If the PGBouncerSidecars feature gate is enabled and custom pgBouncer // sidecars are defined, add the defined container to the Pod. if feature.Enabled(ctx, feature.PGBouncerSidecars) && inCluster.Spec.Proxy.PGBouncer.Containers != nil { - outPod.Containers = append(outPod.Containers, inCluster.Spec.Proxy.PGBouncer.Containers...) + template.Spec.Containers = append(template.Spec.Containers, inCluster.Spec.Proxy.PGBouncer.Containers...) } - outPod.Volumes = []corev1.Volume{configVolume} + template.Spec.Volumes = []corev1.Volume{configVolume} if feature.Enabled(ctx, feature.OpenTelemetryLogs) || feature.Enabled(ctx, feature.OpenTelemetryMetrics) { collector.AddToPod(ctx, inCluster.Spec.Instrumentation, inCluster.Spec.ImagePullPolicy, inConfigMap, - outPod, []corev1.VolumeMount{configVolumeMount}, string(inSecret.Data["pgbouncer-password"]), []string{naming.PGBouncerLogPath}, - true) + template, []corev1.VolumeMount{configVolumeMount}, string(inSecret.Data["pgbouncer-password"]), + []string{naming.PGBouncerLogPath}, true, true) } } diff --git a/internal/pgbouncer/reconcile_test.go b/internal/pgbouncer/reconcile_test.go index b8c2a2a9fe..dd59a1a337 100644 --- a/internal/pgbouncer/reconcile_test.go +++ b/internal/pgbouncer/reconcile_test.go @@ -148,16 +148,16 @@ func TestPod(t *testing.T) { configMap := new(corev1.ConfigMap) primaryCertificate := new(corev1.SecretProjection) secret := new(corev1.Secret) - pod := new(corev1.PodSpec) + template := new(corev1.PodTemplateSpec) - call := func() { Pod(ctx, cluster, configMap, primaryCertificate, secret, pod) } + call := func() { Pod(ctx, cluster, configMap, primaryCertificate, secret, template) } t.Run("Disabled", func(t *testing.T) { - before := pod.DeepCopy() + before := template.DeepCopy() call() // No change when PgBouncer is not requested in the spec. - assert.DeepEqual(t, before, pod) + assert.DeepEqual(t, before, template) }) t.Run("Defaults", func(t *testing.T) { @@ -167,7 +167,7 @@ func TestPod(t *testing.T) { call() - assert.Assert(t, cmp.MarshalMatches(pod, ` + assert.Assert(t, cmp.MarshalMatches(template.Spec, ` containers: - command: - pgbouncer @@ -256,9 +256,9 @@ volumes: `)) // No change when called again. - before := pod.DeepCopy() + before := template.DeepCopy() call() - assert.DeepEqual(t, before, pod) + assert.DeepEqual(t, before, template) }) t.Run("Customizations", func(t *testing.T) { @@ -277,7 +277,7 @@ volumes: call() - assert.Assert(t, cmp.MarshalMatches(pod, ` + assert.Assert(t, cmp.MarshalMatches(template.Spec, ` containers: - command: - pgbouncer @@ -387,7 +387,7 @@ volumes: call() - assert.Assert(t, cmp.MarshalMatches(pod, ` + assert.Assert(t, cmp.MarshalMatches(template.Spec, ` containers: - command: - pgbouncer @@ -491,7 +491,7 @@ volumes: t.Run("SidecarNotEnabled", func(t *testing.T) { call() - assert.Equal(t, len(pod.Containers), 2, "expected 2 containers in Pod, got %d", len(pod.Containers)) + assert.Equal(t, len(template.Spec.Containers), 2, "expected 2 containers in Pod, got %d", len(template.Spec.Containers)) }) t.Run("SidecarEnabled", func(t *testing.T) { @@ -500,11 +500,11 @@ volumes: })) call() - assert.Equal(t, len(pod.Containers), 3, "expected 3 containers in Pod, got %d", len(pod.Containers)) + assert.Equal(t, len(template.Spec.Containers), 3, "expected 3 containers in Pod, got %d", len(template.Spec.Containers)) var found bool - for i := range pod.Containers { - if pod.Containers[i].Name == "customsidecar1" { + for i := range template.Spec.Containers { + if template.Spec.Containers[i].Name == "customsidecar1" { found = true break } diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/config_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/config_types.go index 15eac92d55..e331130ed5 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/config_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/config_types.go @@ -8,6 +8,49 @@ import ( corev1 "k8s.io/api/core/v1" ) +// +structType=atomic +type OptionalConfigMapKeyRef struct { + ConfigMapKeyRef `json:",inline"` + + // Whether or not the ConfigMap or its data must be defined. Defaults to false. + // +optional + Optional *bool `json:"optional,omitempty"` +} + +// AsProjection returns a copy of this as a [corev1.ConfigMapProjection]. +func (in *OptionalConfigMapKeyRef) AsProjection(path string) corev1.ConfigMapProjection { + out := in.ConfigMapKeyRef.AsProjection(path) + if in.Optional != nil { + v := *in.Optional + out.Optional = &v + } + return out +} + +// +structType=atomic +type ConfigMapKeyRef struct { + // Name of the ConfigMap. + // --- + // https://pkg.go.dev/k8s.io/kubernetes/pkg/apis/core/validation#ValidateConfigMapName + // +required + Name DNS1123Subdomain `json:"name"` + + // Name of the data field within the ConfigMap. + // --- + // https://github.com/kubernetes/kubernetes/blob/v1.32.0/pkg/apis/core/validation/validation.go#L2849 + // https://pkg.go.dev/k8s.io/apimachinery/pkg/util/validation#IsConfigMapKey + // +required + Key ConfigDataKey `json:"key"` +} + +// AsProjection returns a copy of this as a [corev1.ConfigMapProjection]. +func (in *ConfigMapKeyRef) AsProjection(path string) corev1.ConfigMapProjection { + var out corev1.ConfigMapProjection + out.Name = in.Name + out.Items = []corev1.KeyToPath{{Key: in.Key, Path: path}} + return out +} + // +structType=atomic type OptionalSecretKeyRef struct { SecretKeyRef `json:",inline"` diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/config_types_test.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/config_types_test.go index ff74a7a1e7..7ef9bdf0e4 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/config_types_test.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/config_types_test.go @@ -14,6 +14,71 @@ import ( "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) +func TestOptionalConfigMapKeyRefAsProjection(t *testing.T) { + t.Run("Null", func(t *testing.T) { + in := v1beta1.OptionalConfigMapKeyRef{} + in.Name, in.Key = "one", "two" + + out := in.AsProjection("three") + b, err := yaml.Marshal(out) + assert.NilError(t, err) + assert.DeepEqual(t, string(b), strings.TrimSpace(` +items: +- key: two + path: three +name: one + `)+"\n") + }) + + t.Run("True", func(t *testing.T) { + True := true + in := v1beta1.OptionalConfigMapKeyRef{Optional: &True} + in.Name, in.Key = "one", "two" + + out := in.AsProjection("three") + b, err := yaml.Marshal(out) + assert.NilError(t, err) + assert.DeepEqual(t, string(b), strings.TrimSpace(` +items: +- key: two + path: three +name: one +optional: true + `)+"\n") + }) + + t.Run("False", func(t *testing.T) { + False := false + in := v1beta1.OptionalConfigMapKeyRef{Optional: &False} + in.Name, in.Key = "one", "two" + + out := in.AsProjection("three") + b, err := yaml.Marshal(out) + assert.NilError(t, err) + assert.DeepEqual(t, string(b), strings.TrimSpace(` +items: +- key: two + path: three +name: one +optional: false + `)+"\n") + }) +} + +func TestConfigMapKeyRefAsProjection(t *testing.T) { + in := v1beta1.ConfigMapKeyRef{Name: "asdf", Key: "foobar"} + out := in.AsProjection("some-path") + + b, err := yaml.Marshal(out) + assert.NilError(t, err) + assert.DeepEqual(t, string(b), strings.TrimSpace(` +items: +- key: foobar + path: some-path +name: asdf + `)+"\n") +} + func TestOptionalSecretKeyRefAsProjection(t *testing.T) { t.Run("Null", func(t *testing.T) { in := v1beta1.OptionalSecretKeyRef{} diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/instrumentation_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/instrumentation_types.go index 8c6272d1f1..d3f6882271 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/instrumentation_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/instrumentation_types.go @@ -11,20 +11,29 @@ import corev1 "k8s.io/api/core/v1" type InstrumentationSpec struct { // Image name to use for collector containers. When omitted, the value // comes from an operator environment variable. + // --- // +optional Image string `json:"image,omitempty"` // Resources holds the resource requirements for the collector container. + // --- // +optional Resources corev1.ResourceRequirements `json:"resources,omitempty"` // Config is the place for users to configure exporters and provide files. + // --- // +optional Config *InstrumentationConfigSpec `json:"config,omitempty"` // Logs is the place for users to configure the log collection. + // --- // +optional Logs *InstrumentationLogsSpec `json:"logs,omitempty"` + + // Metrics is the place for users to configure metrics collection. + // --- + // +optional + Metrics *InstrumentationMetricsSpec `json:"metrics,omitempty"` } // InstrumentationConfigSpec allows users to configure their own exporters, @@ -42,6 +51,7 @@ type InstrumentationConfigSpec struct { // Exporters allows users to configure OpenTelemetry exporters that exist // in the collector image. + // --- // +kubebuilder:pruning:PreserveUnknownFields // +kubebuilder:validation:Schemaless // +kubebuilder:validation:Type=object @@ -91,10 +101,70 @@ type InstrumentationLogsSpec struct { RetentionPeriod *Duration `json:"retentionPeriod,omitempty"` } +type InstrumentationMetricsSpec struct { + // Where users can turn off built-in metrics and also provide their own + // custom queries. + // --- + // +optional + CustomQueries *InstrumentationCustomQueriesSpec `json:"customQueries,omitempty"` +} + +type InstrumentationCustomQueriesSpec struct { + // User defined queries and metrics. + // --- + // +optional + Add []InstrumentationCustomQueries `json:"add,omitempty"` + + // A list of built-in queries that should be removed. If all queries for a + // given SQL statement are removed, the SQL statement will no longer be run. + // --- + // +optional + Remove []string `json:"remove,omitempty"` +} + +type InstrumentationCustomQueries struct { + // The name of this batch of queries, which will be used in naming the OTel + // SqlQuery receiver. + // --- + // OTel restricts component names from having whitespace, control characters, + // or symbols. + // https://github.com/open-telemetry/opentelemetry-collector/blob/main/component/identifiable.go#L23-L26 + // +kubebuilder:validation:Pattern=`^[^\pZ\pC\pS]+$` + // + // Set a max length to keep rule costs low. + // +kubebuilder:validation:MaxLength=20 + // + // +required + Name string `json:"name"` + + // A ConfigMap holding the yaml file that contains the queries. + // --- + // +required + Queries ConfigMapKeyRef `json:"queries"` + + // How often the queries should be run. + // --- + // Kubernetes ensures the value is in the "duration" format, but go ahead + // and loosely validate the format to show some acceptable units. + // NOTE: This rejects fractional numbers: https://github.com/kubernetes/kube-openapi/issues/523 + // +kubebuilder:validation:Pattern=`^((PT)?( *[0-9]+ *(?i:(ms|s|m)|(milli|sec|min)s?))+|0)$` + // + // `controller-gen` needs to know "Type=string" to allow a "Pattern". + // +kubebuilder:validation:Type=string + // + // Set a max length to keep rule costs low. + // +kubebuilder:validation:MaxLength=20 + // +kubebuilder:validation:XValidation:rule=`duration("0") <= self && self <= duration("60m")` + // + // +default="5s" + // +optional + CollectionInterval *Duration `json:"collectionInterval,omitempty"` +} + // --- // Configuration for the OpenTelemetry Batch Processor // https://pkg.go.dev/go.opentelemetry.io/collector/processor/batchprocessor#section-readme -// +// --- // The batch processor stops batching when *either* of these is zero, but that is confusing. // Make the user set both so it is evident there is *no* motivation to create any batch. // +kubebuilder:validation:XValidation:rule=`(has(self.minRecords) && self.minRecords == 0) == (has(self.maxDelay) && self.maxDelay == duration('0'))`,message=`to disable batching, both minRecords and maxDelay must be zero` diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go index b139390346..189eebdd23 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go @@ -118,6 +118,21 @@ func (in *ClusterUpgrade) DeepCopy() *ClusterUpgrade { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ConfigMapKeyRef) DeepCopyInto(out *ConfigMapKeyRef) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ConfigMapKeyRef. +func (in *ConfigMapKeyRef) DeepCopy() *ConfigMapKeyRef { + if in == nil { + return nil + } + out := new(ConfigMapKeyRef) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *CrunchyBridgeCluster) DeepCopyInto(out *CrunchyBridgeCluster) { *out = *in @@ -457,6 +472,54 @@ func (in *InstrumentationConfigSpec) DeepCopy() *InstrumentationConfigSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstrumentationCustomQueries) DeepCopyInto(out *InstrumentationCustomQueries) { + *out = *in + in.Queries.DeepCopyInto(&out.Queries) + if in.CollectionInterval != nil { + in, out := &in.CollectionInterval, &out.CollectionInterval + *out = new(Duration) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstrumentationCustomQueries. +func (in *InstrumentationCustomQueries) DeepCopy() *InstrumentationCustomQueries { + if in == nil { + return nil + } + out := new(InstrumentationCustomQueries) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstrumentationCustomQueriesSpec) DeepCopyInto(out *InstrumentationCustomQueriesSpec) { + *out = *in + if in.Add != nil { + in, out := &in.Add, &out.Add + *out = make([]InstrumentationCustomQueries, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + if in.Remove != nil { + in, out := &in.Remove, &out.Remove + *out = make([]string, len(*in)) + copy(*out, *in) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstrumentationCustomQueriesSpec. +func (in *InstrumentationCustomQueriesSpec) DeepCopy() *InstrumentationCustomQueriesSpec { + if in == nil { + return nil + } + out := new(InstrumentationCustomQueriesSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InstrumentationLogsSpec) DeepCopyInto(out *InstrumentationLogsSpec) { *out = *in @@ -487,6 +550,26 @@ func (in *InstrumentationLogsSpec) DeepCopy() *InstrumentationLogsSpec { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *InstrumentationMetricsSpec) DeepCopyInto(out *InstrumentationMetricsSpec) { + *out = *in + if in.CustomQueries != nil { + in, out := &in.CustomQueries, &out.CustomQueries + *out = new(InstrumentationCustomQueriesSpec) + (*in).DeepCopyInto(*out) + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstrumentationMetricsSpec. +func (in *InstrumentationMetricsSpec) DeepCopy() *InstrumentationMetricsSpec { + if in == nil { + return nil + } + out := new(InstrumentationMetricsSpec) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *InstrumentationSpec) DeepCopyInto(out *InstrumentationSpec) { *out = *in @@ -501,6 +584,11 @@ func (in *InstrumentationSpec) DeepCopyInto(out *InstrumentationSpec) { *out = new(InstrumentationLogsSpec) (*in).DeepCopyInto(*out) } + if in.Metrics != nil { + in, out := &in.Metrics, &out.Metrics + *out = new(InstrumentationMetricsSpec) + (*in).DeepCopyInto(*out) + } } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new InstrumentationSpec. @@ -629,6 +717,27 @@ func (in *OpenTelemetryResourceDetector) DeepCopy() *OpenTelemetryResourceDetect return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *OptionalConfigMapKeyRef) DeepCopyInto(out *OptionalConfigMapKeyRef) { + *out = *in + in.ConfigMapKeyRef.DeepCopyInto(&out.ConfigMapKeyRef) + if in.Optional != nil { + in, out := &in.Optional, &out.Optional + *out = new(bool) + **out = **in + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new OptionalConfigMapKeyRef. +func (in *OptionalConfigMapKeyRef) DeepCopy() *OptionalConfigMapKeyRef { + if in == nil { + return nil + } + out := new(OptionalConfigMapKeyRef) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *OptionalSecretKeyRef) DeepCopyInto(out *OptionalSecretKeyRef) { *out = *in