From 35b110f04424bcebbb585cbde9002c83eafcd9eb Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Wed, 26 Feb 2025 13:42:04 -0600 Subject: [PATCH 1/2] Change PostgresCluster.spec.config to a pointer This field is optional, but the struct always includes it in YAML output. --- internal/config/config.go | 8 +- internal/patroni/config.go | 6 +- internal/pgbackrest/reconcile.go | 2 +- internal/pgbackrest/reconcile_test.go | 6 +- internal/postgres/reconcile.go | 2 +- internal/postgres/reconcile_test.go | 13 +-- internal/testing/require/errors.go | 33 +++++++ internal/testing/validation/pgadmin_test.go | 3 +- .../validation/postgrescluster_test.go | 88 +++++++++---------- .../v1beta1/postgrescluster_types.go | 5 +- ..._test.go => postgrescluster_types_test.go} | 2 - .../v1beta1/zz_generated.deepcopy.go | 6 +- 12 files changed, 102 insertions(+), 72 deletions(-) create mode 100644 internal/testing/require/errors.go rename pkg/apis/postgres-operator.crunchydata.com/v1beta1/{postgrescluster_test.go => postgrescluster_types_test.go} (99%) diff --git a/internal/config/config.go b/internal/config/config.go index 5f2e12a9f8..cc72b921ed 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -22,9 +22,11 @@ func defaultFromEnv(value, key string) string { // FetchKeyCommand returns the fetch_key_cmd value stored in the encryption_key_command // variable used to enable TDE. func FetchKeyCommand(spec *v1beta1.PostgresClusterSpec) string { - if parameters := spec.Config.Parameters; parameters != nil { - if v, ok := parameters["encryption_key_command"]; ok { - return v.String() + if config := spec.Config; config != nil { + if parameters := config.Parameters; parameters != nil { + if v, ok := parameters["encryption_key_command"]; ok { + return v.String() + } } } diff --git a/internal/patroni/config.go b/internal/patroni/config.go index 48c1ec399e..52cf8e5e9e 100644 --- a/internal/patroni/config.go +++ b/internal/patroni/config.go @@ -255,8 +255,10 @@ func DynamicConfiguration( } } // Copy spec.config.parameters over spec.patroni...parameters. - for k, v := range spec.Config.Parameters { - parameters[k] = v + if spec.Config != nil { + for k, v := range spec.Config.Parameters { + parameters[k] = v + } } // Override all of the above with mandatory parameters. if pgParameters.Mandatory != nil { diff --git a/internal/pgbackrest/reconcile.go b/internal/pgbackrest/reconcile.go index 89768e6857..4e789d137e 100644 --- a/internal/pgbackrest/reconcile.go +++ b/internal/pgbackrest/reconcile.go @@ -213,7 +213,7 @@ func AddConfigToRestorePod( } // mount any provided configuration files to the restore Job Pod - if len(cluster.Spec.Config.Files) != 0 { + if cluster.Spec.Config != nil && len(cluster.Spec.Config.Files) != 0 { additionalConfigVolumeMount := postgres.ConfigVolumeMount() additionalConfigVolume := corev1.Volume{Name: additionalConfigVolumeMount.Name} additionalConfigVolume.Projected = &corev1.ProjectedVolumeSource{ diff --git a/internal/pgbackrest/reconcile_test.go b/internal/pgbackrest/reconcile_test.go index b3c50b1f8e..0c9aece2b1 100644 --- a/internal/pgbackrest/reconcile_test.go +++ b/internal/pgbackrest/reconcile_test.go @@ -522,8 +522,10 @@ func TestAddConfigToRestorePod(t *testing.T) { custom.Name = "custom-configmap-files" cluster := cluster.DeepCopy() - cluster.Spec.Config.Files = []corev1.VolumeProjection{ - {ConfigMap: &custom}, + cluster.Spec.Config = &v1beta1.PostgresConfig{ + Files: []corev1.VolumeProjection{ + {ConfigMap: &custom}, + }, } sourceCluster := cluster.DeepCopy() diff --git a/internal/postgres/reconcile.go b/internal/postgres/reconcile.go index aefd5715e8..fda5229792 100644 --- a/internal/postgres/reconcile.go +++ b/internal/postgres/reconcile.go @@ -232,7 +232,7 @@ func InstancePod(ctx context.Context, startup.VolumeMounts = append(startup.VolumeMounts, tablespaceVolumeMount) } - if len(inCluster.Spec.Config.Files) != 0 { + if inCluster.Spec.Config != nil && len(inCluster.Spec.Config.Files) != 0 { additionalConfigVolumeMount := ConfigVolumeMount() additionalConfigVolume := corev1.Volume{Name: additionalConfigVolumeMount.Name} additionalConfigVolume.Projected = &corev1.ProjectedVolumeSource{ diff --git a/internal/postgres/reconcile_test.go b/internal/postgres/reconcile_test.go index 3898f28512..73fabd3014 100644 --- a/internal/postgres/reconcile_test.go +++ b/internal/postgres/reconcile_test.go @@ -16,6 +16,7 @@ import ( "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/testing/cmp" + "github.com/crunchydata/postgres-operator/internal/testing/require" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -480,15 +481,9 @@ volumes: t.Run("WithAdditionalConfigFiles", func(t *testing.T) { clusterWithConfig := cluster.DeepCopy() - clusterWithConfig.Spec.Config.Files = []corev1.VolumeProjection{ - { - Secret: &corev1.SecretProjection{ - LocalObjectReference: corev1.LocalObjectReference{ - Name: "keytab", - }, - }, - }, - } + require.UnmarshalInto(t, &clusterWithConfig.Spec.Config, `{ + files: [{ secret: { name: keytab } }], + }`) pod := new(corev1.PodSpec) InstancePod(ctx, clusterWithConfig, instance, diff --git a/internal/testing/require/errors.go b/internal/testing/require/errors.go new file mode 100644 index 0000000000..128a0397b0 --- /dev/null +++ b/internal/testing/require/errors.go @@ -0,0 +1,33 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package require + +import ( + "errors" + "testing" + + "gotest.tools/v3/assert" + apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// StatusError returns the [metav1.Status] within err's tree. +// It calls t.Fatal when err is nil or there is no status. +func StatusError(t testing.TB, err error) metav1.Status { + status, ok := err.(apierrors.APIStatus) + + assert.Assert(t, ok || errors.As(err, &status), + "%T does not implement %T", err, status) + + return status.Status() +} + +// Value returns v or panics when err is not nil. +func Value[T any](v T, err error) T { + if err != nil { + panic(err) + } + return v +} diff --git a/internal/testing/validation/pgadmin_test.go b/internal/testing/validation/pgadmin_test.go index aa5cdb42e1..e8bd72705c 100644 --- a/internal/testing/validation/pgadmin_test.go +++ b/internal/testing/validation/pgadmin_test.go @@ -46,8 +46,7 @@ func TestPGAdminInstrumentation(t *testing.T) { assert.ErrorContains(t, err, "hour|day|week") assert.ErrorContains(t, err, "one hour") - //nolint:errorlint // This is a test, and a panic is unlikely. - status := err.(apierrors.APIStatus).Status() + status := require.StatusError(t, err) assert.Assert(t, status.Details != nil) assert.Assert(t, cmp.Len(status.Details.Causes, 2)) diff --git a/internal/testing/validation/postgrescluster_test.go b/internal/testing/validation/postgrescluster_test.go index 30b6cff373..5c8bd9f0e3 100644 --- a/internal/testing/validation/postgrescluster_test.go +++ b/internal/testing/validation/postgrescluster_test.go @@ -60,8 +60,7 @@ func TestPostgresConfigParameters(t *testing.T) { {"archive_timeout", "20s"}, } { t.Run(tt.key, func(t *testing.T) { - cluster, err := runtime.ToUnstructuredObject(base) - assert.NilError(t, err) + cluster := require.Value(runtime.ToUnstructuredObject(base)) assert.NilError(t, unstructured.SetNestedField(cluster.Object, tt.value, "spec", "config", "parameters", tt.key)) @@ -89,16 +88,14 @@ func TestPostgresConfigParameters(t *testing.T) { {key: "wal_log_hints", value: "off"}, } { t.Run(tt.key, func(t *testing.T) { - cluster, err := runtime.ToUnstructuredObject(base) - assert.NilError(t, err) + cluster := require.Value(runtime.ToUnstructuredObject(base)) assert.NilError(t, unstructured.SetNestedField(cluster.Object, tt.value, "spec", "config", "parameters", tt.key)) - err = cc.Create(ctx, cluster, client.DryRunAll) + err := cc.Create(ctx, cluster, client.DryRunAll) assert.Assert(t, apierrors.IsInvalid(err)) - //nolint:errorlint // This is a test, and a panic is unlikely. - status := err.(apierrors.APIStatus).Status() + status := require.StatusError(t, err) assert.Assert(t, status.Details != nil) assert.Assert(t, cmp.Len(status.Details.Causes, 1)) @@ -112,18 +109,17 @@ func TestPostgresConfigParameters(t *testing.T) { t.Run("NoConnections", func(t *testing.T) { for _, tt := range []struct { key string - value intstr.IntOrString + value any }{ - {key: "ssl", value: intstr.FromString("off")}, - {key: "ssl_ca_file", value: intstr.FromString("")}, - {key: "unix_socket_directories", value: intstr.FromString("one")}, - {key: "unix_socket_group", value: intstr.FromString("two")}, + {key: "ssl", value: "off"}, + {key: "ssl_ca_file", value: ""}, + {key: "unix_socket_directories", value: "one"}, + {key: "unix_socket_group", value: "two"}, } { t.Run(tt.key, func(t *testing.T) { - cluster := base.DeepCopy() - cluster.Spec.Config.Parameters = map[string]intstr.IntOrString{ - tt.key: tt.value, - } + cluster := require.Value(runtime.ToUnstructuredObject(base)) + assert.NilError(t, unstructured.SetNestedField(cluster.Object, + tt.value, "spec", "config", "parameters", tt.key)) err := cc.Create(ctx, cluster, client.DryRunAll) assert.Assert(t, apierrors.IsInvalid(err)) @@ -134,19 +130,18 @@ func TestPostgresConfigParameters(t *testing.T) { t.Run("NoWriteAheadLog", func(t *testing.T) { for _, tt := range []struct { key string - value intstr.IntOrString + value any }{ - {key: "archive_mode", value: intstr.FromString("off")}, - {key: "archive_command", value: intstr.FromString("true")}, - {key: "restore_command", value: intstr.FromString("true")}, - {key: "recovery_target", value: intstr.FromString("immediate")}, - {key: "recovery_target_name", value: intstr.FromString("doot")}, + {key: "archive_mode", value: "off"}, + {key: "archive_command", value: "true"}, + {key: "restore_command", value: "true"}, + {key: "recovery_target", value: "immediate"}, + {key: "recovery_target_name", value: "doot"}, } { t.Run(tt.key, func(t *testing.T) { - cluster := base.DeepCopy() - cluster.Spec.Config.Parameters = map[string]intstr.IntOrString{ - tt.key: tt.value, - } + cluster := require.Value(runtime.ToUnstructuredObject(base)) + assert.NilError(t, unstructured.SetNestedField(cluster.Object, + tt.value, "spec", "config", "parameters", tt.key)) err := cc.Create(ctx, cluster, client.DryRunAll) assert.Assert(t, apierrors.IsInvalid(err)) @@ -158,8 +153,10 @@ func TestPostgresConfigParameters(t *testing.T) { t.Run("Valid", func(t *testing.T) { cluster := base.DeepCopy() - cluster.Spec.Config.Parameters = map[string]intstr.IntOrString{ - "wal_level": intstr.FromString("logical"), + cluster.Spec.Config = &v1beta1.PostgresConfig{ + Parameters: map[string]intstr.IntOrString{ + "wal_level": intstr.FromString("logical"), + }, } assert.NilError(t, cc.Create(ctx, cluster, client.DryRunAll)) }) @@ -167,16 +164,17 @@ func TestPostgresConfigParameters(t *testing.T) { t.Run("Invalid", func(t *testing.T) { cluster := base.DeepCopy() - cluster.Spec.Config.Parameters = map[string]intstr.IntOrString{ - "wal_level": intstr.FromString("minimal"), + cluster.Spec.Config = &v1beta1.PostgresConfig{ + Parameters: map[string]intstr.IntOrString{ + "wal_level": intstr.FromString("minimal"), + }, } err := cc.Create(ctx, cluster, client.DryRunAll) assert.Assert(t, apierrors.IsInvalid(err)) assert.ErrorContains(t, err, `"replica" or higher`) - //nolint:errorlint // This is a test, and a panic is unlikely. - status := err.(apierrors.APIStatus).Status() + status := require.StatusError(t, err) assert.Assert(t, status.Details != nil) assert.Assert(t, cmp.Len(status.Details.Causes, 1)) assert.Equal(t, status.Details.Causes[0].Field, "spec.config.parameters") @@ -187,18 +185,17 @@ func TestPostgresConfigParameters(t *testing.T) { t.Run("NoReplication", func(t *testing.T) { for _, tt := range []struct { key string - value intstr.IntOrString + value any }{ - {key: "synchronous_standby_names", value: intstr.FromString("")}, - {key: "primary_conninfo", value: intstr.FromString("")}, - {key: "primary_slot_name", value: intstr.FromString("")}, - {key: "recovery_min_apply_delay", value: intstr.FromString("")}, + {key: "synchronous_standby_names", value: ""}, + {key: "primary_conninfo", value: ""}, + {key: "primary_slot_name", value: ""}, + {key: "recovery_min_apply_delay", value: ""}, } { t.Run(tt.key, func(t *testing.T) { - cluster := base.DeepCopy() - cluster.Spec.Config.Parameters = map[string]intstr.IntOrString{ - tt.key: tt.value, - } + cluster := require.Value(runtime.ToUnstructuredObject(base)) + assert.NilError(t, unstructured.SetNestedField(cluster.Object, + tt.value, "spec", "config", "parameters", tt.key)) err := cc.Create(ctx, cluster, client.DryRunAll) assert.Assert(t, apierrors.IsInvalid(err)) @@ -251,8 +248,7 @@ func TestPostgresUserOptions(t *testing.T) { assert.Assert(t, apierrors.IsInvalid(err)) assert.ErrorContains(t, err, "cannot contain comments") - //nolint:errorlint // This is a test, and a panic is unlikely. - status := err.(apierrors.APIStatus).Status() + status := require.StatusError(t, err) assert.Assert(t, status.Details != nil) assert.Assert(t, cmp.Len(status.Details.Causes, 3)) @@ -273,8 +269,7 @@ func TestPostgresUserOptions(t *testing.T) { assert.Assert(t, apierrors.IsInvalid(err)) assert.ErrorContains(t, err, "cannot assign password") - //nolint:errorlint // This is a test, and a panic is unlikely. - status := err.(apierrors.APIStatus).Status() + status := require.StatusError(t, err) assert.Assert(t, status.Details != nil) assert.Assert(t, cmp.Len(status.Details.Causes, 2)) @@ -294,8 +289,7 @@ func TestPostgresUserOptions(t *testing.T) { assert.Assert(t, apierrors.IsInvalid(err)) assert.ErrorContains(t, err, "should match") - //nolint:errorlint // This is a test, and a panic is unlikely. - status := err.(apierrors.APIStatus).Status() + status := require.StatusError(t, err) assert.Assert(t, status.Details != nil) assert.Assert(t, cmp.Len(status.Details.Causes, 1)) assert.Equal(t, status.Details.Causes[0].Field, "spec.users[0].options") diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go index e6b75bddae..9f661b0640 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types.go @@ -25,6 +25,9 @@ type PostgresClusterSpec struct { // +optional Backups Backups `json:"backups,omitempty"` + // +optional + Config *PostgresConfig `json:"config,omitempty"` + // The secret containing the Certificates and Keys to encrypt PostgreSQL // traffic will need to contain the server TLS certificate, TLS key and the // Certificate Authority certificate with the data keys set to tls.crt, @@ -188,8 +191,6 @@ type PostgresClusterSpec struct { // +kubebuilder:validation:MaxItems=64 // +optional Users []PostgresUserSpec `json:"users,omitempty"` - - Config PostgresConfig `json:"config,omitempty"` } // DataSource defines data sources for a new PostgresCluster. diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_test.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types_test.go similarity index 99% rename from pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_test.go rename to pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types_test.go index 099418b494..356e8665a6 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_test.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/postgrescluster_types_test.go @@ -42,7 +42,6 @@ spec: backups: pgbackrest: repos: null - config: {} instances: null patroni: leaderLeaseDurationSeconds: 30 @@ -75,7 +74,6 @@ spec: backups: pgbackrest: repos: null - config: {} instances: - dataVolumeClaimSpec: resources: {} diff --git a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go index acca4b1f47..86f3fcb34f 100644 --- a/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go +++ b/pkg/apis/postgres-operator.crunchydata.com/v1beta1/zz_generated.deepcopy.go @@ -1801,6 +1801,11 @@ func (in *PostgresClusterSpec) DeepCopyInto(out *PostgresClusterSpec) { (*in).DeepCopyInto(*out) } in.Backups.DeepCopyInto(&out.Backups) + if in.Config != nil { + in, out := &in.Config, &out.Config + *out = new(PostgresConfig) + (*in).DeepCopyInto(*out) + } if in.CustomTLSSecret != nil { in, out := &in.CustomTLSSecret, &out.CustomTLSSecret *out = new(corev1.SecretProjection) @@ -1905,7 +1910,6 @@ func (in *PostgresClusterSpec) DeepCopyInto(out *PostgresClusterSpec) { (*in)[i].DeepCopyInto(&(*out)[i]) } } - in.Config.DeepCopyInto(&out.Config) } // DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PostgresClusterSpec. From b1de990eb0d28bff119a0c793169c6942e1faa04 Mon Sep 17 00:00:00 2001 From: Chris Bandy Date: Wed, 26 Feb 2025 12:14:48 -0600 Subject: [PATCH 2/2] Calculate Postgres parameters in the controller The controller assigned mandatory and default values but did nothing with values defined in the spec. The patroni.DynamicConfiguration function was interpreting its schemaless fields while also combining Postgres parameters from elsewhere. Its tests were long and complicated. 1. Postgres parameters are now extracted from the schemaless Patroni field in their own function with its own tests. Unusual types are handled more deliberately. 2. The PostgresCluster controller now creates a single set of Postgres parameters based on all the fields of the PostgresCluster spec. 3. The DynamicConfiguration function is simpler (44 lines, 30% smaller) and easier to test. --- internal/collector/postgres.go | 24 +- internal/collector/postgres_test.go | 8 +- .../controller/postgrescluster/cluster.go | 2 +- .../controller/postgrescluster/controller.go | 12 +- .../controller/postgrescluster/patroni.go | 2 +- .../controller/postgrescluster/postgres.go | 65 ++++ .../postgrescluster/postgres_test.go | 117 +++++++ internal/patroni/config.go | 59 +--- internal/patroni/config_test.go | 295 ++---------------- internal/patroni/postgres.go | 56 ++++ internal/patroni/postgres_test.go | 112 +++++++ internal/patroni/reconcile.go | 2 +- internal/patroni/reconcile_test.go | 2 +- internal/postgres/parameters.go | 17 +- internal/postgres/parameters_test.go | 10 + internal/testing/cmp/cmp.go | 9 + internal/testing/require/encoding_test.go | 1 + 17 files changed, 439 insertions(+), 354 deletions(-) create mode 100644 internal/patroni/postgres.go create mode 100644 internal/patroni/postgres_test.go diff --git a/internal/collector/postgres.go b/internal/collector/postgres.go index 416c27ecda..8e88cf1b33 100644 --- a/internal/collector/postgres.go +++ b/internal/collector/postgres.go @@ -19,7 +19,7 @@ import ( func NewConfigForPostgresPod(ctx context.Context, inCluster *v1beta1.PostgresCluster, - outParameters *postgres.Parameters, + outParameters *postgres.ParameterSet, ) *Config { config := NewConfig(inCluster.Spec.Instrumentation) @@ -72,22 +72,22 @@ func EnablePostgresLogging( ctx context.Context, inCluster *v1beta1.PostgresCluster, outConfig *Config, - outParameters *postgres.Parameters, + outParameters *postgres.ParameterSet, ) { if feature.Enabled(ctx, feature.OpenTelemetryLogs) { directory := postgres.LogDirectory() // https://www.postgresql.org/docs/current/runtime-config-logging.html - outParameters.Mandatory.Add("logging_collector", "on") - outParameters.Mandatory.Add("log_directory", directory) + outParameters.Add("logging_collector", "on") + outParameters.Add("log_directory", directory) // PostgreSQL v8.3 adds support for CSV logging, and // PostgreSQL v15 adds support for JSON logging. The latter is preferred // because newlines are escaped as "\n", U+005C + U+006E. if inCluster.Spec.PostgresVersion < 15 { - outParameters.Mandatory.Add("log_destination", "csvlog") + outParameters.Add("log_destination", "csvlog") } else { - outParameters.Mandatory.Add("log_destination", "jsonlog") + outParameters.Add("log_destination", "jsonlog") } // Keep seven days of logs named for the day of the week; @@ -100,14 +100,14 @@ func EnablePostgresLogging( // probably requires another process that deletes the oldest files. // // The ".log" suffix is replaced by ".json" for JSON log files. - outParameters.Mandatory.Add("log_filename", "postgresql-%a.log") - outParameters.Mandatory.Add("log_file_mode", "0660") - outParameters.Mandatory.Add("log_rotation_age", "1d") - outParameters.Mandatory.Add("log_rotation_size", "0") - outParameters.Mandatory.Add("log_truncate_on_rotation", "on") + outParameters.Add("log_filename", "postgresql-%a.log") + outParameters.Add("log_file_mode", "0660") + outParameters.Add("log_rotation_age", "1d") + outParameters.Add("log_rotation_size", "0") + outParameters.Add("log_truncate_on_rotation", "on") // Log in a timezone that the OpenTelemetry Collector will understand. - outParameters.Mandatory.Add("log_timezone", "UTC") + outParameters.Add("log_timezone", "UTC") // Keep track of what log records and files have been processed. // Use a subdirectory of the logs directory to stay within the same failure domain. diff --git a/internal/collector/postgres_test.go b/internal/collector/postgres_test.go index bba986ac41..9c55757fbd 100644 --- a/internal/collector/postgres_test.go +++ b/internal/collector/postgres_test.go @@ -27,9 +27,9 @@ func TestEnablePostgresLogging(t *testing.T) { cluster.Spec.PostgresVersion = 99 config := NewConfig(nil) - params := postgres.NewParameters() + params := postgres.NewParameterSet() - EnablePostgresLogging(ctx, cluster, config, ¶ms) + EnablePostgresLogging(ctx, cluster, config, params) result, err := config.ToYAML() assert.NilError(t, err) @@ -255,9 +255,9 @@ service: cluster.Spec.Instrumentation = testInstrumentationSpec() config := NewConfig(cluster.Spec.Instrumentation) - params := postgres.NewParameters() + params := postgres.NewParameterSet() - EnablePostgresLogging(ctx, cluster, config, ¶ms) + EnablePostgresLogging(ctx, cluster, config, params) result, err := config.ToYAML() assert.NilError(t, err) diff --git a/internal/controller/postgrescluster/cluster.go b/internal/controller/postgrescluster/cluster.go index 5cd515f5a6..4cd62f60c8 100644 --- a/internal/controller/postgrescluster/cluster.go +++ b/internal/controller/postgrescluster/cluster.go @@ -30,7 +30,7 @@ import ( // files (etc) that apply to the entire cluster. func (r *Reconciler) reconcileClusterConfigMap( ctx context.Context, cluster *v1beta1.PostgresCluster, - pgHBAs postgres.HBAs, pgParameters postgres.Parameters, + pgHBAs postgres.HBAs, pgParameters *postgres.ParameterSet, ) (*corev1.ConfigMap, error) { clusterConfigMap := &corev1.ConfigMap{ObjectMeta: naming.ClusterConfigMap(cluster)} clusterConfigMap.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) diff --git a/internal/controller/postgrescluster/controller.go b/internal/controller/postgrescluster/controller.go index c200fa0e27..4de285e559 100644 --- a/internal/controller/postgrescluster/controller.go +++ b/internal/controller/postgrescluster/controller.go @@ -33,8 +33,6 @@ import ( "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/kubernetes" "github.com/crunchydata/postgres-operator/internal/logging" - "github.com/crunchydata/postgres-operator/internal/pgaudit" - "github.com/crunchydata/postgres-operator/internal/pgbackrest" "github.com/crunchydata/postgres-operator/internal/pgbouncer" "github.com/crunchydata/postgres-operator/internal/pgmonitor" "github.com/crunchydata/postgres-operator/internal/pki" @@ -237,15 +235,9 @@ func (r *Reconciler) Reconcile( pgmonitor.PostgreSQLHBAs(ctx, cluster, &pgHBAs) pgbouncer.PostgreSQL(cluster, &pgHBAs) - pgParameters := postgres.NewParameters() - pgaudit.PostgreSQLParameters(&pgParameters) - pgbackrest.PostgreSQL(cluster, &pgParameters, backupsSpecFound) - pgmonitor.PostgreSQLParameters(ctx, cluster, &pgParameters) + pgParameters := r.generatePostgresParameters(ctx, cluster, backupsSpecFound) - otelConfig := collector.NewConfigForPostgresPod(ctx, cluster, &pgParameters) - - // Set huge_pages = try if a hugepages resource limit > 0, otherwise set "off" - postgres.SetHugePages(cluster, &pgParameters) + otelConfig := collector.NewConfigForPostgresPod(ctx, cluster, pgParameters) if err == nil { rootCA, err = r.reconcileRootCertificate(ctx, cluster) diff --git a/internal/controller/postgrescluster/patroni.go b/internal/controller/postgrescluster/patroni.go index 995de75b61..5242169be6 100644 --- a/internal/controller/postgrescluster/patroni.go +++ b/internal/controller/postgrescluster/patroni.go @@ -173,7 +173,7 @@ func (r *Reconciler) reconcilePatroniDistributedConfiguration( func (r *Reconciler) reconcilePatroniDynamicConfiguration( ctx context.Context, cluster *v1beta1.PostgresCluster, instances *observedInstances, - pgHBAs postgres.HBAs, pgParameters postgres.Parameters, + pgHBAs postgres.HBAs, pgParameters *postgres.ParameterSet, ) error { if !patroni.ClusterBootstrapped(cluster) { // Patroni has not yet bootstrapped. Dynamic configuration happens through diff --git a/internal/controller/postgrescluster/postgres.go b/internal/controller/postgrescluster/postgres.go index 0806445586..25ffeefc99 100644 --- a/internal/controller/postgrescluster/postgres.go +++ b/internal/controller/postgrescluster/postgres.go @@ -6,6 +6,7 @@ package postgrescluster import ( "bytes" + "cmp" "context" "fmt" "io" @@ -29,7 +30,10 @@ import ( "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/logging" "github.com/crunchydata/postgres-operator/internal/naming" + "github.com/crunchydata/postgres-operator/internal/patroni" "github.com/crunchydata/postgres-operator/internal/pgaudit" + "github.com/crunchydata/postgres-operator/internal/pgbackrest" + "github.com/crunchydata/postgres-operator/internal/pgmonitor" "github.com/crunchydata/postgres-operator/internal/postgis" "github.com/crunchydata/postgres-operator/internal/postgres" pgpassword "github.com/crunchydata/postgres-operator/internal/postgres/password" @@ -37,6 +41,67 @@ import ( "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) +// generatePostgresParameters produces the parameter set for cluster that +// incorporates, from highest to lowest precedence: +// 1. mandatory values determined by controllers +// 2. parameters in cluster.spec.config.parameters +// 3. parameters in cluster.spec.patroni.dynamicConfiguration +// 4. default values determined by contollers +func (*Reconciler) generatePostgresParameters( + ctx context.Context, cluster *v1beta1.PostgresCluster, backupsSpecFound bool, +) *postgres.ParameterSet { + builtin := postgres.NewParameters() + pgaudit.PostgreSQLParameters(&builtin) + pgbackrest.PostgreSQL(cluster, &builtin, backupsSpecFound) + pgmonitor.PostgreSQLParameters(ctx, cluster, &builtin) + postgres.SetHugePages(cluster, &builtin) + + // Last write wins, so start with the recommended defaults. + result := cmp.Or(builtin.Default.DeepCopy(), postgres.NewParameterSet()) + + // Overwrite the above with any parameters specified in the Patroni section. + for k, v := range patroni.PostgresParameters(cluster.Spec.Patroni).AsMap() { + result.Add(k, v) + } + + // Overwrite the above with any parameters specified in the Config section. + if config := cluster.Spec.Config; config != nil { + for k, v := range config.Parameters { + result.Add(k, v.String()) + } + } + + // Overwrite the above with mandatory values. + if builtin.Mandatory != nil { + // This parameter is a comma-separated list. Rather than overwrite the + // user-defined value, we want to combine it with the mandatory one. + preload := result.Value("shared_preload_libraries") + + for k, v := range builtin.Mandatory.AsMap() { + // Load mandatory libraries ahead of user-defined libraries. + if k == "shared_preload_libraries" && len(v) > 0 && len(preload) > 0 { + v = v + "," + preload + } + + result.Add(k, v) + } + } + + // Some preload libraries belong at specific positions in this list. + if preload, ok := result.Get("shared_preload_libraries"); ok { + // Load "citus" ahead of any other libraries. + // - https://github.com/citusdata/citus/blob/v12.0.0/src/backend/distributed/shared_library_init.c#L417-L419 + // - https://github.com/citusdata/citus/blob/v13.0.0/src/backend/distributed/shared_library_init.c#L420-L422 + if strings.Contains(preload, "citus") { + preload = "citus," + preload + } + + result.Add("shared_preload_libraries", preload) + } + + return result +} + // generatePostgresUserSecret returns a Secret containing a password and // connection details for the first database in spec. When existing is nil or // lacks a password or verifier, a new password and verifier are generated. diff --git a/internal/controller/postgrescluster/postgres_test.go b/internal/controller/postgrescluster/postgres_test.go index c14e68851b..f6da644a09 100644 --- a/internal/controller/postgrescluster/postgres_test.go +++ b/internal/controller/postgrescluster/postgres_test.go @@ -34,6 +34,123 @@ import ( "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) +func TestGeneratePostgresParameters(t *testing.T) { + ctx := context.Background() + reconciler := &Reconciler{} + + builtin := reconciler.generatePostgresParameters(ctx, v1beta1.NewPostgresCluster(), false) + assert.Assert(t, len(builtin.AsMap()) > 0, + "expected an empty cluster to have some builtin parameters") + + assert.Equal(t, builtin.Value("jit"), "off", + "BUG IN TEST: expected JIT to be disabled") + + assert.Equal(t, builtin.Value("shared_preload_libraries"), "pgaudit", + "BUG IN TEST: expected pgAudit to be mandatory") + + t.Run("Config", func(t *testing.T) { + cluster := v1beta1.NewPostgresCluster() + require.UnmarshalInto(t, &cluster.Spec.Config, `{ + parameters: { + something: str, + another: 5, + }, + }`) + + result := reconciler.generatePostgresParameters(ctx, cluster, false) + assert.Assert(t, cmp.LenMap(result.AsMap(), len(builtin.AsMap())+2), + "expected two parameters from the Config section") + + assert.Equal(t, result.Value("another"), "5") + assert.Equal(t, result.Value("something"), "str") + }) + + t.Run("Patroni", func(t *testing.T) { + cluster := v1beta1.NewPostgresCluster() + require.UnmarshalInto(t, &cluster.Spec.Patroni, `{ + dynamicConfiguration: { + postgresql: { parameters: { + something: str, + another: 5.1, + } }, + }, + }`) + + result := reconciler.generatePostgresParameters(ctx, cluster, false) + assert.Assert(t, cmp.LenMap(result.AsMap(), len(builtin.AsMap())+2), + "expected two parameters from the Patroni section") + + assert.Equal(t, result.Value("another"), "5.1") + assert.Equal(t, result.Value("something"), "str") + }) + + t.Run("Precedence", func(t *testing.T) { + cluster := v1beta1.NewPostgresCluster() + require.UnmarshalInto(t, &cluster.Spec.Config, `{ + parameters: { + something: replaced, + unrelated: used, + jit: "on", + }, + }`) + require.UnmarshalInto(t, &cluster.Spec.Patroni, `{ + dynamicConfiguration: { + postgresql: { parameters: { + something: str, + another: 5.1, + } }, + }, + }`) + + result := reconciler.generatePostgresParameters(ctx, cluster, false) + assert.Assert(t, cmp.LenMap(result.AsMap(), len(builtin.AsMap())+3+1-1), + "expected three parameters from the Config section,"+ + "plus one from the Patroni section, minus one default") + + assert.Equal(t, result.Value("another"), "5.1") // Patroni + assert.Equal(t, result.Value("something"), "replaced") // Config + assert.Equal(t, result.Value("unrelated"), "used") // Config + assert.Equal(t, result.Value("jit"), "on") // Config + }) + + t.Run("shared_preload_libraries", func(t *testing.T) { + t.Run("NumericIncluded", func(t *testing.T) { + cluster := v1beta1.NewPostgresCluster() + require.UnmarshalInto(t, &cluster.Spec.Config, `{ + parameters: { + shared_preload_libraries: 123, + }, + }`) + + result := reconciler.generatePostgresParameters(ctx, cluster, false) + assert.Assert(t, cmp.Contains(result.Value("shared_preload_libraries"), "123")) + }) + + t.Run("Precedence", func(t *testing.T) { + cluster := v1beta1.NewPostgresCluster() + require.UnmarshalInto(t, &cluster.Spec.Config, `{ + parameters: { + shared_preload_libraries: given, + }, + }`) + + result := reconciler.generatePostgresParameters(ctx, cluster, false) + assert.Equal(t, result.Value("shared_preload_libraries"), "pgaudit,given", + "expected mandatory ahead of specified") + + require.UnmarshalInto(t, &cluster.Spec.Config, `{ + parameters: { + shared_preload_libraries: 'given, citus,other' + }, + }`) + + result = reconciler.generatePostgresParameters(ctx, cluster, false) + assert.Equal(t, result.Value("shared_preload_libraries"), "citus,pgaudit,given, citus,other", + "expected citus in front") + }) + }) +} + func TestGeneratePostgresUserSecret(t *testing.T) { _, tClient := setupKubernetes(t) require.ParallelCapacity(t, 0) diff --git a/internal/patroni/config.go b/internal/patroni/config.go index 52cf8e5e9e..2174607c63 100644 --- a/internal/patroni/config.go +++ b/internal/patroni/config.go @@ -10,7 +10,6 @@ import ( "strings" corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/yaml" "github.com/crunchydata/postgres-operator/internal/config" @@ -40,7 +39,7 @@ const ( // clusterYAML returns Patroni settings that apply to the entire cluster. func clusterYAML( cluster *v1beta1.PostgresCluster, - pgHBAs postgres.HBAs, pgParameters postgres.Parameters, patroniLogStorageLimit int64, + pgHBAs postgres.HBAs, parameters *postgres.ParameterSet, patroniLogStorageLimit int64, ) (string, error) { root := map[string]any{ // The cluster identifier. This value cannot change during the cluster's @@ -193,7 +192,7 @@ func clusterYAML( // facilitate it. When Patroni is already bootstrapped, this field is ignored. root["bootstrap"] = map[string]any{ - "dcs": DynamicConfiguration(&cluster.Spec, pgHBAs, pgParameters), + "dcs": DynamicConfiguration(&cluster.Spec, pgHBAs, parameters), // Missing here is "users" which runs *after* "post_bootstrap". It is // not possible to use roles created by the former in the latter. @@ -209,7 +208,7 @@ func clusterYAML( // and returns a value that can be marshaled to JSON. func DynamicConfiguration( spec *v1beta1.PostgresClusterSpec, - pgHBAs postgres.HBAs, pgParameters postgres.Parameters, + pgHBAs postgres.HBAs, parameters *postgres.ParameterSet, ) map[string]any { // Copy the entire configuration before making any changes. root := make(map[string]any) @@ -242,55 +241,9 @@ func DynamicConfiguration( } root["postgresql"] = postgresql - // Copy the "postgresql.parameters" section over any defaults. - parameters := make(map[string]any) - if pgParameters.Default != nil { - for k, v := range pgParameters.Default.AsMap() { - parameters[k] = v - } - } - if section, ok := postgresql["parameters"].(map[string]any); ok { - for k, v := range section { - parameters[k] = v - } - } - // Copy spec.config.parameters over spec.patroni...parameters. - if spec.Config != nil { - for k, v := range spec.Config.Parameters { - parameters[k] = v - } - } - // Override all of the above with mandatory parameters. - if pgParameters.Mandatory != nil { - for k, v := range pgParameters.Mandatory.AsMap() { - - // This parameter is a comma-separated list. Rather than overwrite the - // user-defined value, we want to combine it with the mandatory one. - // Some libraries belong at specific positions in the list, so figure - // that out as well. - if k == "shared_preload_libraries" { - // Load mandatory libraries ahead of user-defined libraries. - switch s := parameters[k].(type) { - case string: - if len(s) > 0 { - v = v + "," + s - } - case intstr.IntOrString: - if len(s.StrVal) > 0 { - v = v + "," + s.StrVal - } - } - // Load "citus" ahead of any other libraries. - // - https://github.com/citusdata/citus/blob/v12.0.0/src/backend/distributed/shared_library_init.c#L417-L419 - if strings.Contains(v, "citus") { - v = "citus," + v - } - } - - parameters[k] = v - } + if m := parameters.AsMap(); m != nil { + postgresql["parameters"] = m } - postgresql["parameters"] = parameters // Copy the "postgresql.pg_hba" section after any mandatory values. hba := make([]string, 0, len(pgHBAs.Mandatory)) @@ -350,7 +303,7 @@ func DynamicConfiguration( // Populate the standby leader by shipping logs through pgBackRest. // This also overrides the "restore_command" used by standby replicas. // - https://www.postgresql.org/docs/current/warm-standby.html - standby["restore_command"] = pgParameters.Mandatory.Value("restore_command") + standby["restore_command"] = parameters.Value("restore_command") } standby["create_replica_methods"] = methods diff --git a/internal/patroni/config_test.go b/internal/patroni/config_test.go index d69edf8da1..d5ce0eb81d 100644 --- a/internal/patroni/config_test.go +++ b/internal/patroni/config_test.go @@ -15,7 +15,6 @@ import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/util/intstr" "sigs.k8s.io/yaml" "github.com/crunchydata/postgres-operator/internal/postgres" @@ -33,7 +32,7 @@ func TestClusterYAML(t *testing.T) { cluster.Namespace = "some-namespace" cluster.Name = "cluster-name" - data, err := clusterYAML(cluster, postgres.HBAs{}, postgres.Parameters{}, 0) + data, err := clusterYAML(cluster, postgres.HBAs{}, postgres.NewParameterSet(), 0) assert.NilError(t, err) assert.Equal(t, data, strings.TrimSpace(` # Generated by postgres-operator. DO NOT EDIT. @@ -92,7 +91,7 @@ watchdog: cluster.Name = "cluster-name" cluster.Spec.PostgresVersion = 14 - data, err := clusterYAML(cluster, postgres.HBAs{}, postgres.Parameters{}, 0) + data, err := clusterYAML(cluster, postgres.HBAs{}, postgres.NewParameterSet(), 0) assert.NilError(t, err) assert.Equal(t, data, strings.TrimSpace(` # Generated by postgres-operator. DO NOT EDIT. @@ -160,7 +159,7 @@ watchdog: Level: &logLevel, } - data, err := clusterYAML(cluster, postgres.HBAs{}, postgres.Parameters{}, 1000) + data, err := clusterYAML(cluster, postgres.HBAs{}, postgres.NewParameterSet(), 1000) assert.NilError(t, err) assert.Equal(t, data, strings.TrimSpace(` # Generated by postgres-operator. DO NOT EDIT. @@ -235,7 +234,7 @@ func TestDynamicConfiguration(t *testing.T) { name string spec string hbas postgres.HBAs - params postgres.Parameters + params *postgres.ParameterSet expected map[string]any }{ { @@ -244,7 +243,6 @@ func TestDynamicConfiguration(t *testing.T) { "loop_wait": int32(10), "ttl": int32(30), "postgresql": map[string]any{ - "parameters": map[string]any{}, "pg_hba": []string{}, "use_pg_rewind": true, "use_slots": false, @@ -265,7 +263,6 @@ func TestDynamicConfiguration(t *testing.T) { "ttl": int32(30), "retry_timeout": int64(5), "postgresql": map[string]any{ - "parameters": map[string]any{}, "pg_hba": []string{}, "use_pg_rewind": true, "use_slots": false, @@ -288,7 +285,6 @@ func TestDynamicConfiguration(t *testing.T) { "loop_wait": int32(8), "ttl": int32(99), "postgresql": map[string]any{ - "parameters": map[string]any{}, "pg_hba": []string{}, "use_pg_rewind": true, "use_slots": false, @@ -308,7 +304,6 @@ func TestDynamicConfiguration(t *testing.T) { "loop_wait": int32(10), "ttl": int32(30), "postgresql": map[string]any{ - "parameters": map[string]any{}, "pg_hba": []string{}, "use_pg_rewind": true, "use_slots": false, @@ -331,7 +326,6 @@ func TestDynamicConfiguration(t *testing.T) { "loop_wait": int32(10), "ttl": int32(30), "postgresql": map[string]any{ - "parameters": map[string]any{}, "pg_hba": []string{}, "use_pg_rewind": true, "use_slots": "input", @@ -339,111 +333,30 @@ func TestDynamicConfiguration(t *testing.T) { }, }, { - name: "postgresql.parameters: wrong-type is ignored", - spec: `{ - patroni: { - dynamicConfiguration: { - postgresql: { - parameters: true, - }, - }, - }, - }`, - expected: map[string]any{ - "loop_wait": int32(10), - "ttl": int32(30), - "postgresql": map[string]any{ - "parameters": map[string]any{}, - "pg_hba": []string{}, - "use_pg_rewind": true, - "use_slots": false, - }, - }, - }, - { - name: "postgresql.parameters: input passes through", + name: "Postgres parameters pass through", spec: `{ patroni: { dynamicConfiguration: { postgresql: { parameters: { - something: str, - another: 5, + calculated: elsewhere, }, }, }, }, }`, + params: parameters(map[string]string{ + "something": "str", + "another": "5", + "unrelated": "default", + }), expected: map[string]any{ "loop_wait": int32(10), "ttl": int32(30), "postgresql": map[string]any{ - "parameters": map[string]any{ + "parameters": map[string]string{ "something": "str", - "another": int64(5), - }, - "pg_hba": []string{}, - "use_pg_rewind": true, - "use_slots": false, - }, - }, - }, - { - name: "config.parameters takes precedence", - spec: `{ - config: { - parameters: { - something: this, - }, - }, - patroni: { - dynamicConfiguration: { - postgresql: { - parameters: { - something: str, - another: 5, - }, - }, - }, - }, - }`, - expected: map[string]any{ - "loop_wait": int32(10), - "ttl": int32(30), - "postgresql": map[string]any{ - "parameters": map[string]any{ - "something": intstr.FromString("this"), - "another": int64(5), - }, - "pg_hba": []string{}, - "use_pg_rewind": true, - "use_slots": false, - }, - }, - }, - { - name: "config.parameters: input overrides default", - spec: `{ - config: { - parameters: { - something: str, - another: 5, - }, - }, - }`, - params: postgres.Parameters{ - Default: parameters(map[string]string{ - "something": "overridden", - "unrelated": "default", - }), - }, - expected: map[string]any{ - "loop_wait": int32(10), - "ttl": int32(30), - "postgresql": map[string]any{ - "parameters": map[string]any{ - "something": intstr.FromString("str"), - "another": intstr.FromInt(5), + "another": "5", "unrelated": "default", }, "pg_hba": []string{}, @@ -452,118 +365,6 @@ func TestDynamicConfiguration(t *testing.T) { }, }, }, - { - name: "config.parameters: mandatory overrides input", - spec: `{ - config: { - parameters: { - something: str, - another: 5, - }, - }, - }`, - params: postgres.Parameters{ - Mandatory: parameters(map[string]string{ - "something": "overrides", - "unrelated": "setting", - }), - }, - expected: map[string]any{ - "loop_wait": int32(10), - "ttl": int32(30), - "postgresql": map[string]any{ - "parameters": map[string]any{ - "something": "overrides", - "another": intstr.FromInt(5), - "unrelated": "setting", - }, - "pg_hba": []string{}, - "use_pg_rewind": true, - "use_slots": false, - }, - }, - }, - { - name: "config.parameters: mandatory shared_preload_libraries", - spec: `{ - config: { - parameters: { - shared_preload_libraries: given, - }, - }, - }`, - params: postgres.Parameters{ - Mandatory: parameters(map[string]string{ - "shared_preload_libraries": "mandatory", - }), - }, - expected: map[string]any{ - "loop_wait": int32(10), - "ttl": int32(30), - "postgresql": map[string]any{ - "parameters": map[string]any{ - "shared_preload_libraries": "mandatory,given", - }, - "pg_hba": []string{}, - "use_pg_rewind": true, - "use_slots": false, - }, - }, - }, - { - name: "config.parameters: mandatory shared_preload_libraries wrong-type is ignored", - spec: `{ - config: { - parameters: { - shared_preload_libraries: 1, - }, - }, - }`, - params: postgres.Parameters{ - Mandatory: parameters(map[string]string{ - "shared_preload_libraries": "mandatory", - }), - }, - expected: map[string]any{ - "loop_wait": int32(10), - "ttl": int32(30), - "postgresql": map[string]any{ - "parameters": map[string]any{ - "shared_preload_libraries": "mandatory", - }, - "pg_hba": []string{}, - "use_pg_rewind": true, - "use_slots": false, - }, - }, - }, - { - name: "config.parameters: shared_preload_libraries order", - spec: `{ - config: { - parameters: { - shared_preload_libraries: "given, citus, more", - }, - }, - }`, - params: postgres.Parameters{ - Mandatory: parameters(map[string]string{ - "shared_preload_libraries": "mandatory", - }), - }, - expected: map[string]any{ - "loop_wait": int32(10), - "ttl": int32(30), - "postgresql": map[string]any{ - "parameters": map[string]any{ - "shared_preload_libraries": "citus,mandatory,given, citus, more", - }, - "pg_hba": []string{}, - "use_pg_rewind": true, - "use_slots": false, - }, - }, - }, { name: "postgresql.pg_hba: wrong-type is ignored", spec: `{ @@ -579,7 +380,6 @@ func TestDynamicConfiguration(t *testing.T) { "loop_wait": int32(10), "ttl": int32(30), "postgresql": map[string]any{ - "parameters": map[string]any{}, "pg_hba": []string{}, "use_pg_rewind": true, "use_slots": false, @@ -606,7 +406,6 @@ func TestDynamicConfiguration(t *testing.T) { "loop_wait": int32(10), "ttl": int32(30), "postgresql": map[string]any{ - "parameters": map[string]any{}, "pg_hba": []string{ "local all all peer", }, @@ -635,7 +434,6 @@ func TestDynamicConfiguration(t *testing.T) { "loop_wait": int32(10), "ttl": int32(30), "postgresql": map[string]any{ - "parameters": map[string]any{}, "pg_hba": []string{ "custom", }, @@ -664,7 +462,6 @@ func TestDynamicConfiguration(t *testing.T) { "loop_wait": int32(10), "ttl": int32(30), "postgresql": map[string]any{ - "parameters": map[string]any{}, "pg_hba": []string{ "local all all peer", "custom", @@ -694,7 +491,6 @@ func TestDynamicConfiguration(t *testing.T) { "loop_wait": int32(10), "ttl": int32(30), "postgresql": map[string]any{ - "parameters": map[string]any{}, "pg_hba": []string{ "local all all peer", "custom", @@ -719,7 +515,6 @@ func TestDynamicConfiguration(t *testing.T) { "loop_wait": int32(10), "ttl": int32(30), "postgresql": map[string]any{ - "parameters": map[string]any{}, "pg_hba": []string{}, "use_pg_rewind": true, "use_slots": false, @@ -745,16 +540,14 @@ func TestDynamicConfiguration(t *testing.T) { }, }, }`, - params: postgres.Parameters{ - Mandatory: parameters(map[string]string{ - "restore_command": "mandatory", - }), - }, + params: parameters(map[string]string{ + "restore_command": "mandatory", + }), expected: map[string]any{ "loop_wait": int32(10), "ttl": int32(30), "postgresql": map[string]any{ - "parameters": map[string]any{ + "parameters": map[string]string{ "restore_command": "mandatory", }, "pg_hba": []string{}, @@ -787,16 +580,14 @@ func TestDynamicConfiguration(t *testing.T) { }, }, }`, - params: postgres.Parameters{ - Mandatory: parameters(map[string]string{ - "restore_command": "mandatory", - }), - }, + params: parameters(map[string]string{ + "restore_command": "mandatory", + }), expected: map[string]any{ "loop_wait": int32(10), "ttl": int32(30), "postgresql": map[string]any{ - "parameters": map[string]any{ + "parameters": map[string]string{ "restore_command": "mandatory", }, "pg_hba": []string{}, @@ -831,16 +622,14 @@ func TestDynamicConfiguration(t *testing.T) { }, }, }`, - params: postgres.Parameters{ - Mandatory: parameters(map[string]string{ - "restore_command": "mandatory", - }), - }, + params: parameters(map[string]string{ + "restore_command": "mandatory", + }), expected: map[string]any{ "loop_wait": int32(10), "ttl": int32(30), "postgresql": map[string]any{ - "parameters": map[string]any{ + "parameters": map[string]string{ "restore_command": "mandatory", }, "pg_hba": []string{}, @@ -865,40 +654,16 @@ func TestDynamicConfiguration(t *testing.T) { }, }, }`, + params: parameters(map[string]string{ + "encryption_key_command": "echo one", + }), expected: map[string]any{ "loop_wait": int32(10), "ttl": int32(30), "postgresql": map[string]any{ "bin_name": map[string]any{"pg_rewind": string("/tmp/pg_rewind_tde.sh")}, - "parameters": map[string]any{ - "encryption_key_command": intstr.FromString("echo one"), - }, - "pg_hba": []string{}, - "use_pg_rewind": bool(true), - "use_slots": bool(false), - }, - }, - }, - { - name: "postgresql.parameters: tde enabled", - spec: `{ - patroni: { - dynamicConfiguration: { - postgresql: { - parameters: { - encryption_key_command: echo test, - }, - }, - }, - }, - }`, - expected: map[string]any{ - "loop_wait": int32(10), - "ttl": int32(30), - "postgresql": map[string]any{ - "bin_name": map[string]any{"pg_rewind": string("/tmp/pg_rewind_tde.sh")}, - "parameters": map[string]any{ - "encryption_key_command": "echo test", + "parameters": map[string]string{ + "encryption_key_command": "echo one", }, "pg_hba": []string{}, "use_pg_rewind": bool(true), diff --git a/internal/patroni/postgres.go b/internal/patroni/postgres.go new file mode 100644 index 0000000000..cb686312fa --- /dev/null +++ b/internal/patroni/postgres.go @@ -0,0 +1,56 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package patroni + +import ( + "encoding/json" + "fmt" + + "github.com/crunchydata/postgres-operator/internal/postgres" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +// PostgresParameters returns the Postgres parameters in spec, if any. +func PostgresParameters(spec *v1beta1.PatroniSpec) *postgres.ParameterSet { + result := postgres.NewParameterSet() + + if spec != nil { + // DynamicConfiguration lacks an OpenAPI schema, so it may contain any type + // at any depth. Navigate the object and convert parameter values to string. + // + // Patroni accepts booleans, integers, and strings but also parses + // string values into the types it expects: + // https://github.com/patroni/patroni/blob/v4.0.0/patroni/postgresql/validator.py + // + // Patroni passes JSON arrays and objects through Python str() which looks + // similar to YAML in simple cases: + // https://github.com/patroni/patroni/blob/v4.0.0/patroni/postgresql/config.py#L254-L259 + // + // >>> str(list((1, 2.3, True, "asdf"))) + // "[1, 2.3, True, 'asdf']" + // + // >>> str(dict(a = 1, b = True)) + // "{'a': 1, 'b': True}" + // + if root := spec.DynamicConfiguration; root != nil { + if postgresql, ok := root["postgresql"].(map[string]any); ok { + if section, ok := postgresql["parameters"].(map[string]any); ok { + for k, v := range section { + switch v.(type) { + case []any, map[string]any: + if b, err := json.Marshal(v); err == nil { + result.Add(k, string(b)) + } + default: + result.Add(k, fmt.Sprint(v)) + } + } + } + } + } + } + + return result +} diff --git a/internal/patroni/postgres_test.go b/internal/patroni/postgres_test.go new file mode 100644 index 0000000000..16fdc30fdf --- /dev/null +++ b/internal/patroni/postgres_test.go @@ -0,0 +1,112 @@ +// Copyright 2021 - 2025 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package patroni + +import ( + "testing" + + "gotest.tools/v3/assert" + + "github.com/crunchydata/postgres-operator/internal/testing/require" + "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" +) + +func TestPostgresParameters(t *testing.T) { + t.Run("Zero", func(t *testing.T) { + result := PostgresParameters(nil) + + assert.Assert(t, result != nil) + assert.DeepEqual(t, result.AsMap(), map[string]string{}) + }) + + t.Run("NoDynamicConfig", func(t *testing.T) { + spec := new(v1beta1.PatroniSpec) + result := PostgresParameters(spec) + + assert.Assert(t, result != nil) + assert.DeepEqual(t, result.AsMap(), map[string]string{}) + }) + + t.Run("NoPostgreSQL", func(t *testing.T) { + spec := new(v1beta1.PatroniSpec) + require.UnmarshalInto(t, spec, `{ + dynamicConfiguration: {}, + }`) + result := PostgresParameters(spec) + + assert.Assert(t, result != nil) + assert.DeepEqual(t, result.AsMap(), map[string]string{}) + }) + + t.Run("WrongPostgreSQLType", func(t *testing.T) { + spec := new(v1beta1.PatroniSpec) + require.UnmarshalInto(t, spec, `{ + dynamicConfiguration: { + postgresql: asdf, + }, + }`) + result := PostgresParameters(spec) + + assert.Assert(t, result != nil) + assert.DeepEqual(t, result.AsMap(), map[string]string{}) + }) + + t.Run("NoParameters", func(t *testing.T) { + spec := new(v1beta1.PatroniSpec) + require.UnmarshalInto(t, spec, `{ + dynamicConfiguration: { + postgresql: { + use_pg_rewind: true, + }, + }, + }`) + result := PostgresParameters(spec) + + assert.Assert(t, result != nil) + assert.DeepEqual(t, result.AsMap(), map[string]string{}) + }) + + t.Run("WrongParametersType", func(t *testing.T) { + spec := new(v1beta1.PatroniSpec) + require.UnmarshalInto(t, spec, `{ + dynamicConfiguration: { + postgresql: { + parameters: [1,2], + }, + }, + }`) + result := PostgresParameters(spec) + + assert.Assert(t, result != nil) + assert.DeepEqual(t, result.AsMap(), map[string]string{}) + }) + + t.Run("Parameters", func(t *testing.T) { + spec := new(v1beta1.PatroniSpec) + require.UnmarshalInto(t, spec, `{ + dynamicConfiguration: { + postgresql: { + parameters: { + log_statement_sample_rate: 0.98, + max_connections: 1000, + wal_log_hints: true, + wal_level: replica, + strange.though: [ 1, 2.3, yes ], + }, + }, + }, + }`) + result := PostgresParameters(spec) + + assert.Assert(t, result != nil) + assert.DeepEqual(t, result.AsMap(), map[string]string{ + "log_statement_sample_rate": "0.98", + "max_connections": "1000", + "wal_log_hints": "true", + "wal_level": "replica", + "strange.though": "[1,2.3,true]", + }) + }) +} diff --git a/internal/patroni/reconcile.go b/internal/patroni/reconcile.go index 19c1131d7d..394a33d6d5 100644 --- a/internal/patroni/reconcile.go +++ b/internal/patroni/reconcile.go @@ -30,7 +30,7 @@ func ClusterBootstrapped(postgresCluster *v1beta1.PostgresCluster) bool { func ClusterConfigMap(ctx context.Context, inCluster *v1beta1.PostgresCluster, inHBAs postgres.HBAs, - inParameters postgres.Parameters, + inParameters *postgres.ParameterSet, outClusterConfigMap *corev1.ConfigMap, patroniLogStorageLimit int64, ) error { diff --git a/internal/patroni/reconcile_test.go b/internal/patroni/reconcile_test.go index 61916db258..9a82dfde2d 100644 --- a/internal/patroni/reconcile_test.go +++ b/internal/patroni/reconcile_test.go @@ -25,7 +25,7 @@ func TestClusterConfigMap(t *testing.T) { cluster := new(v1beta1.PostgresCluster) pgHBAs := postgres.HBAs{} - pgParameters := postgres.Parameters{} + pgParameters := postgres.NewParameterSet() cluster.Default() config := new(corev1.ConfigMap) diff --git a/internal/postgres/parameters.go b/internal/postgres/parameters.go index 58b86131f8..469eef0bfb 100644 --- a/internal/postgres/parameters.go +++ b/internal/postgres/parameters.go @@ -6,6 +6,7 @@ package postgres import ( "fmt" + "maps" "slices" "strings" ) @@ -68,17 +69,21 @@ func NewParameterSet() *ParameterSet { // AsMap returns a copy of ps as a map. func (ps *ParameterSet) AsMap() map[string]string { - out := make(map[string]string, len(ps.values)) - for name, value := range ps.values { - out[name] = value + if ps == nil { + return nil } - return out + + return maps.Clone(ps.values) } // DeepCopy returns a copy of ps. -func (ps *ParameterSet) DeepCopy() (out *ParameterSet) { +func (ps *ParameterSet) DeepCopy() *ParameterSet { + if ps == nil { + return nil + } + return &ParameterSet{ - values: ps.AsMap(), + values: maps.Clone(ps.values), } } diff --git a/internal/postgres/parameters_test.go b/internal/postgres/parameters_test.go index dc08d7004a..5126899d90 100644 --- a/internal/postgres/parameters_test.go +++ b/internal/postgres/parameters_test.go @@ -31,6 +31,16 @@ func TestNewParameters(t *testing.T) { } func TestParameterSet(t *testing.T) { + t.Run("NilAsMap", func(t *testing.T) { + m := (*ParameterSet)(nil).AsMap() + assert.Assert(t, m == nil) + }) + + t.Run("NilDeepCopy", func(t *testing.T) { + ps := (*ParameterSet)(nil).DeepCopy() + assert.Assert(t, ps == nil) + }) + ps := NewParameterSet() ps.Add("x", "y") diff --git a/internal/testing/cmp/cmp.go b/internal/testing/cmp/cmp.go index 3ddaad73f5..d7b5764e41 100644 --- a/internal/testing/cmp/cmp.go +++ b/internal/testing/cmp/cmp.go @@ -56,6 +56,15 @@ func Len[Slice ~[]E, E any](actual Slice, expected int) Comparison { return gotest.Len(actual, expected) } +// LenMap succeeds if actual has the expected length. +func LenMap[Map ~map[K]V, K comparable, V any](actual Map, expected int) Comparison { + // There doesn't seem to be a way to express "map or slice" in type constraints + // that [Go 1.22] compiler can nicely infer. Ideally, this function goes + // away when a better constraint can be expressed on [Len]. + + return gotest.Len(actual, expected) +} + // MarshalContains converts actual to YAML and succeeds if expected is in the result. func MarshalContains(actual any, expected string) Comparison { b, err := yaml.Marshal(actual) diff --git a/internal/testing/require/encoding_test.go b/internal/testing/require/encoding_test.go index b7c287c1c2..e4f53611eb 100644 --- a/internal/testing/require/encoding_test.go +++ b/internal/testing/require/encoding_test.go @@ -29,6 +29,7 @@ func TestUnmarshalInto(t *testing.T) { {input: `asdf`, expected: "asdf"}, {input: `"asdf"`, expected: "asdf"}, {input: `[1, 2.3, true]`, expected: []any{int64(1), float64(2.3), true}}, + {input: `{a: b, c, d}`, expected: map[string]any{"a": "b", "c": nil, "d": nil}}, } { sink := reflect.Zero(reflect.TypeOf(tt.expected)).Interface() require.UnmarshalInto(t, &sink, tt.input)