diff --git a/.golangci.yaml b/.golangci.yaml index 59feb443de..d46231c417 100644 --- a/.golangci.yaml +++ b/.golangci.yaml @@ -40,6 +40,12 @@ linters-settings: - pkg: github.com/crunchydata/postgres-operator/internal/testing/* desc: The "internal/testing" packages should be used only in tests. + tests: + files: ['$test'] + deny: + - pkg: github.com/pkg/errors + desc: Use the "errors" package unless you are interacting with stack traces. + errchkjson: check-error-free-encoding: true diff --git a/Makefile b/Makefile index 37aca1a37e..10e6b1c038 100644 --- a/Makefile +++ b/Makefile @@ -9,9 +9,6 @@ PGMONITOR_DIR ?= hack/tools/pgmonitor PGMONITOR_VERSION ?= v5.1.1 QUERIES_CONFIG_DIR ?= hack/tools/queries -EXTERNAL_SNAPSHOTTER_DIR ?= hack/tools/external-snapshotter -EXTERNAL_SNAPSHOTTER_VERSION ?= v8.0.1 - # Buildah's "build" used to be "bud". Use the alias to be compatible for a while. BUILDAH_BUILD ?= buildah bud @@ -55,12 +52,6 @@ get-pgmonitor: cp -r '$(PGMONITOR_DIR)/postgres_exporter/common/.' '${QUERIES_CONFIG_DIR}' cp '$(PGMONITOR_DIR)/postgres_exporter/linux/queries_backrest.yml' '${QUERIES_CONFIG_DIR}' -.PHONY: get-external-snapshotter -get-external-snapshotter: - git -C '$(dir $(EXTERNAL_SNAPSHOTTER_DIR))' clone https://github.com/kubernetes-csi/external-snapshotter.git || git -C '$(EXTERNAL_SNAPSHOTTER_DIR)' fetch origin - @git -C '$(EXTERNAL_SNAPSHOTTER_DIR)' checkout '$(EXTERNAL_SNAPSHOTTER_VERSION)' - @git -C '$(EXTERNAL_SNAPSHOTTER_DIR)' config pull.ff only - .PHONY: clean clean: ## Clean resources clean: clean-deprecated @@ -203,7 +194,7 @@ check: get-pgmonitor check-envtest: ## Run check using envtest and a mock kube api check-envtest: ENVTEST_USE = $(ENVTEST) --bin-dir=$(CURDIR)/hack/tools/envtest use $(ENVTEST_K8S_VERSION) check-envtest: SHELL = bash -check-envtest: get-pgmonitor tools/setup-envtest get-external-snapshotter +check-envtest: get-pgmonitor tools/setup-envtest @$(ENVTEST_USE) --print=overview && echo source <($(ENVTEST_USE) --print=env) && PGO_NAMESPACE="postgres-operator" QUERIES_CONFIG_DIR="$(CURDIR)/${QUERIES_CONFIG_DIR}" \ $(GO_TEST) -count=1 -cover ./... @@ -214,7 +205,7 @@ check-envtest: get-pgmonitor tools/setup-envtest get-external-snapshotter # make check-envtest-existing PGO_TEST_TIMEOUT_SCALE=1.2 .PHONY: check-envtest-existing check-envtest-existing: ## Run check using envtest and an existing kube api -check-envtest-existing: get-pgmonitor get-external-snapshotter +check-envtest-existing: get-pgmonitor check-envtest-existing: createnamespaces kubectl apply --server-side -k ./config/dev USE_EXISTING_CLUSTER=true PGO_NAMESPACE="postgres-operator" QUERIES_CONFIG_DIR="$(CURDIR)/${QUERIES_CONFIG_DIR}" \ diff --git a/go.mod b/go.mod index d268d66018..71f55afa1f 100644 --- a/go.mod +++ b/go.mod @@ -22,6 +22,7 @@ require ( go.opentelemetry.io/otel/sdk v1.27.0 go.opentelemetry.io/otel/trace v1.27.0 golang.org/x/crypto v0.27.0 + golang.org/x/tools v0.22.0 gotest.tools/v3 v3.1.0 k8s.io/api v0.30.2 k8s.io/apimachinery v0.30.2 @@ -72,13 +73,14 @@ require ( go.opentelemetry.io/otel/metric v1.27.0 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8 // indirect + golang.org/x/mod v0.18.0 // indirect golang.org/x/net v0.29.0 // indirect golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/sync v0.8.0 // indirect golang.org/x/sys v0.25.0 // indirect golang.org/x/term v0.24.0 // indirect golang.org/x/text v0.18.0 // indirect golang.org/x/time v0.5.0 // indirect - golang.org/x/tools v0.22.0 // indirect gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20240610135401-a8a62080eff3 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240903143218-8af14fe29dc1 // indirect diff --git a/go.sum b/go.sum index aed2056f6f..7bfdd47f96 100644 --- a/go.sum +++ b/go.sum @@ -161,6 +161,8 @@ golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8 h1:LoYXNGAShUG3m/ehNk4iFctuh golang.org/x/exp v0.0.0-20240604190554-fc45aab8b7f8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -172,6 +174,8 @@ golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= +golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= diff --git a/internal/controller/pgupgrade/world_test.go b/internal/controller/pgupgrade/world_test.go index 4aa24f714d..a6801c12eb 100644 --- a/internal/controller/pgupgrade/world_test.go +++ b/internal/controller/pgupgrade/world_test.go @@ -13,8 +13,8 @@ import ( corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/runtime/schema" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) @@ -34,7 +34,7 @@ func TestPopulateCluster(t *testing.T) { t.Run("NotFound", func(t *testing.T) { cluster := v1beta1.NewPostgresCluster() - expected := apierrors.NewNotFound(schema.GroupResource{}, "name") + expected := apierrors.NewNotFound(runtime.GR{}, "name") world := NewWorld() err := world.populateCluster(cluster, expected) diff --git a/internal/controller/postgrescluster/cluster_test.go b/internal/controller/postgrescluster/cluster_test.go index be9e371a56..491add9f34 100644 --- a/internal/controller/postgrescluster/cluster_test.go +++ b/internal/controller/postgrescluster/cluster_test.go @@ -8,7 +8,6 @@ import ( "context" "testing" - "github.com/pkg/errors" "go.opentelemetry.io/otel" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" @@ -17,12 +16,11 @@ import ( rbacv1 "k8s.io/api/rbac/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/client-go/tools/record" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/testing/cmp" @@ -30,7 +28,7 @@ import ( "github.com/crunchydata/postgres-operator/pkg/apis/postgres-operator.crunchydata.com/v1beta1" ) -var gvks = []schema.GroupVersionKind{{ +var gvks = []runtime.GVK{{ Group: corev1.SchemeGroupVersion.Group, Version: corev1.SchemeGroupVersion.Version, Kind: "ConfigMapList", @@ -91,7 +89,7 @@ func TestCustomLabels(t *testing.T) { ns := setupNamespace(t, cc) reconcileTestCluster := func(cluster *v1beta1.PostgresCluster) { - assert.NilError(t, errors.WithStack(reconciler.Client.Create(ctx, cluster))) + assert.NilError(t, reconciler.Client.Create(ctx, cluster)) t.Cleanup(func() { // Remove finalizers, if any, so the namespace can terminate. assert.Check(t, client.IgnoreNotFound( @@ -107,28 +105,25 @@ func TestCustomLabels(t *testing.T) { assert.Assert(t, result.Requeue == false) } - getUnstructuredLabels := func(cluster v1beta1.PostgresCluster, u unstructured.Unstructured) (map[string]map[string]string, error) { - var err error + getUnstructuredLabels := func(t *testing.T, cluster *v1beta1.PostgresCluster, u *unstructured.Unstructured) map[string]map[string]string { + t.Helper() labels := map[string]map[string]string{} - if metav1.IsControlledBy(&u, &cluster) { + if metav1.IsControlledBy(u, cluster) { switch u.GetKind() { case "StatefulSet": - var resource appsv1.StatefulSet - err = runtime.DefaultUnstructuredConverter. - FromUnstructured(u.UnstructuredContent(), &resource) + resource, err := runtime.FromUnstructuredObject[appsv1.StatefulSet](u) + assert.NilError(t, err) labels["resource"] = resource.GetLabels() labels["podTemplate"] = resource.Spec.Template.GetLabels() case "Deployment": - var resource appsv1.Deployment - err = runtime.DefaultUnstructuredConverter. - FromUnstructured(u.UnstructuredContent(), &resource) + resource, err := runtime.FromUnstructuredObject[appsv1.Deployment](u) + assert.NilError(t, err) labels["resource"] = resource.GetLabels() labels["podTemplate"] = resource.Spec.Template.GetLabels() case "CronJob": - var resource batchv1.CronJob - err = runtime.DefaultUnstructuredConverter. - FromUnstructured(u.UnstructuredContent(), &resource) + resource, err := runtime.FromUnstructuredObject[batchv1.CronJob](u) + assert.NilError(t, err) labels["resource"] = resource.GetLabels() labels["jobTemplate"] = resource.Spec.JobTemplate.GetLabels() labels["jobPodTemplate"] = resource.Spec.JobTemplate.Spec.Template.GetLabels() @@ -136,7 +131,7 @@ func TestCustomLabels(t *testing.T) { labels["resource"] = u.GetLabels() } } - return labels, err + return labels } t.Run("Cluster", func(t *testing.T) { @@ -176,10 +171,8 @@ func TestCustomLabels(t *testing.T) { client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) - for i := range uList.Items { - u := uList.Items[i] - labels, err := getUnstructuredLabels(*cluster, u) - assert.NilError(t, err) + for _, u := range uList.Items { + labels := getUnstructuredLabels(t, cluster, &u) for resourceType, resourceLabels := range labels { t.Run(u.GetKind()+"/"+u.GetName()+"/"+resourceType, func(t *testing.T) { assert.Equal(t, resourceLabels["my.cluster.label"], "daisy") @@ -226,11 +219,8 @@ func TestCustomLabels(t *testing.T) { client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) - for i := range uList.Items { - u := uList.Items[i] - - labels, err := getUnstructuredLabels(*cluster, u) - assert.NilError(t, err) + for _, u := range uList.Items { + labels := getUnstructuredLabels(t, cluster, &u) for resourceType, resourceLabels := range labels { t.Run(u.GetKind()+"/"+u.GetName()+"/"+resourceType, func(t *testing.T) { assert.Equal(t, resourceLabels["my.instance.label"], set.Metadata.Labels["my.instance.label"]) @@ -276,11 +266,8 @@ func TestCustomLabels(t *testing.T) { client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) - for i := range uList.Items { - u := uList.Items[i] - - labels, err := getUnstructuredLabels(*cluster, u) - assert.NilError(t, err) + for _, u := range uList.Items { + labels := getUnstructuredLabels(t, cluster, &u) for resourceType, resourceLabels := range labels { t.Run(u.GetKind()+"/"+u.GetName()+"/"+resourceType, func(t *testing.T) { assert.Equal(t, resourceLabels["my.pgbackrest.label"], "lucy") @@ -314,11 +301,8 @@ func TestCustomLabels(t *testing.T) { client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) - for i := range uList.Items { - u := uList.Items[i] - - labels, err := getUnstructuredLabels(*cluster, u) - assert.NilError(t, err) + for _, u := range uList.Items { + labels := getUnstructuredLabels(t, cluster, &u) for resourceType, resourceLabels := range labels { t.Run(u.GetKind()+"/"+u.GetName()+"/"+resourceType, func(t *testing.T) { assert.Equal(t, resourceLabels["my.pgbouncer.label"], "lucy") @@ -344,7 +328,7 @@ func TestCustomAnnotations(t *testing.T) { ns := setupNamespace(t, cc) reconcileTestCluster := func(cluster *v1beta1.PostgresCluster) { - assert.NilError(t, errors.WithStack(reconciler.Client.Create(ctx, cluster))) + assert.NilError(t, reconciler.Client.Create(ctx, cluster)) t.Cleanup(func() { // Remove finalizers, if any, so the namespace can terminate. assert.Check(t, client.IgnoreNotFound( @@ -360,28 +344,25 @@ func TestCustomAnnotations(t *testing.T) { assert.Assert(t, result.Requeue == false) } - getUnstructuredAnnotations := func(cluster v1beta1.PostgresCluster, u unstructured.Unstructured) (map[string]map[string]string, error) { - var err error + getUnstructuredAnnotations := func(t *testing.T, cluster *v1beta1.PostgresCluster, u *unstructured.Unstructured) map[string]map[string]string { + t.Helper() annotations := map[string]map[string]string{} - if metav1.IsControlledBy(&u, &cluster) { + if metav1.IsControlledBy(u, cluster) { switch u.GetKind() { case "StatefulSet": - var resource appsv1.StatefulSet - err = runtime.DefaultUnstructuredConverter. - FromUnstructured(u.UnstructuredContent(), &resource) + resource, err := runtime.FromUnstructuredObject[appsv1.StatefulSet](u) + assert.NilError(t, err) annotations["resource"] = resource.GetAnnotations() annotations["podTemplate"] = resource.Spec.Template.GetAnnotations() case "Deployment": - var resource appsv1.Deployment - err = runtime.DefaultUnstructuredConverter. - FromUnstructured(u.UnstructuredContent(), &resource) + resource, err := runtime.FromUnstructuredObject[appsv1.Deployment](u) + assert.NilError(t, err) annotations["resource"] = resource.GetAnnotations() annotations["podTemplate"] = resource.Spec.Template.GetAnnotations() case "CronJob": - var resource batchv1.CronJob - err = runtime.DefaultUnstructuredConverter. - FromUnstructured(u.UnstructuredContent(), &resource) + resource, err := runtime.FromUnstructuredObject[batchv1.CronJob](u) + assert.NilError(t, err) annotations["resource"] = resource.GetAnnotations() annotations["jobTemplate"] = resource.Spec.JobTemplate.GetAnnotations() annotations["jobPodTemplate"] = resource.Spec.JobTemplate.Spec.Template.GetAnnotations() @@ -389,7 +370,7 @@ func TestCustomAnnotations(t *testing.T) { annotations["resource"] = u.GetAnnotations() } } - return annotations, err + return annotations } t.Run("Cluster", func(t *testing.T) { @@ -430,10 +411,8 @@ func TestCustomAnnotations(t *testing.T) { client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) - for i := range uList.Items { - u := uList.Items[i] - annotations, err := getUnstructuredAnnotations(*cluster, u) - assert.NilError(t, err) + for _, u := range uList.Items { + annotations := getUnstructuredAnnotations(t, cluster, &u) for resourceType, resourceAnnotations := range annotations { t.Run(u.GetKind()+"/"+u.GetName()+"/"+resourceType, func(t *testing.T) { assert.Equal(t, resourceAnnotations["my.cluster.annotation"], "daisy") @@ -480,11 +459,8 @@ func TestCustomAnnotations(t *testing.T) { client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) - for i := range uList.Items { - u := uList.Items[i] - - annotations, err := getUnstructuredAnnotations(*cluster, u) - assert.NilError(t, err) + for _, u := range uList.Items { + annotations := getUnstructuredAnnotations(t, cluster, &u) for resourceType, resourceAnnotations := range annotations { t.Run(u.GetKind()+"/"+u.GetName()+"/"+resourceType, func(t *testing.T) { assert.Equal(t, resourceAnnotations["my.instance.annotation"], set.Metadata.Annotations["my.instance.annotation"]) @@ -530,11 +506,8 @@ func TestCustomAnnotations(t *testing.T) { client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) - for i := range uList.Items { - u := uList.Items[i] - - annotations, err := getUnstructuredAnnotations(*cluster, u) - assert.NilError(t, err) + for _, u := range uList.Items { + annotations := getUnstructuredAnnotations(t, cluster, &u) for resourceType, resourceAnnotations := range annotations { t.Run(u.GetKind()+"/"+u.GetName()+"/"+resourceType, func(t *testing.T) { assert.Equal(t, resourceAnnotations["my.pgbackrest.annotation"], "lucy") @@ -568,11 +541,8 @@ func TestCustomAnnotations(t *testing.T) { client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selector})) - for i := range uList.Items { - u := uList.Items[i] - - annotations, err := getUnstructuredAnnotations(*cluster, u) - assert.NilError(t, err) + for _, u := range uList.Items { + annotations := getUnstructuredAnnotations(t, cluster, &u) for resourceType, resourceAnnotations := range annotations { t.Run(u.GetKind()+"/"+u.GetName()+"/"+resourceType, func(t *testing.T) { assert.Equal(t, resourceAnnotations["my.pgbouncer.annotation"], "lucy") diff --git a/internal/controller/postgrescluster/controller_test.go b/internal/controller/postgrescluster/controller_test.go index d6f3730623..b9e928ecce 100644 --- a/internal/controller/postgrescluster/controller_test.go +++ b/internal/controller/postgrescluster/controller_test.go @@ -13,7 +13,7 @@ import ( . "github.com/onsi/ginkgo/v2" . "github.com/onsi/gomega" . "github.com/onsi/gomega/gstruct" - "github.com/pkg/errors" + "github.com/pkg/errors" //nolint:depguard // This legacy test covers so much code, it logs the origin of unexpected errors. "go.opentelemetry.io/otel" "gotest.tools/v3/assert" diff --git a/internal/controller/postgrescluster/instance.go b/internal/controller/postgrescluster/instance.go index 66321cc738..8a0eb21ba3 100644 --- a/internal/controller/postgrescluster/instance.go +++ b/internal/controller/postgrescluster/instance.go @@ -21,7 +21,6 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" "sigs.k8s.io/controller-runtime/pkg/client" @@ -466,7 +465,7 @@ func (r *Reconciler) deleteInstances( // stop schedules pod for deletion by scaling its controller to zero. stop := func(pod *corev1.Pod) error { - instance := &unstructured.Unstructured{} + instance := &appsv1.StatefulSet{} instance.SetNamespace(cluster.Namespace) switch owner := metav1.GetControllerOfNoCopy(pod); { @@ -474,8 +473,6 @@ func (r *Reconciler) deleteInstances( return errors.Errorf("pod %q has no owner", client.ObjectKeyFromObject(pod)) case owner.Kind == "StatefulSet": - instance.SetAPIVersion(owner.APIVersion) - instance.SetKind(owner.Kind) instance.SetName(owner.Name) default: @@ -536,7 +533,7 @@ func (r *Reconciler) deleteInstance( cluster *v1beta1.PostgresCluster, instanceName string, ) error { - gvks := []schema.GroupVersionKind{{ + gvks := []runtime.GVK{{ Group: corev1.SchemeGroupVersion.Group, Version: corev1.SchemeGroupVersion.Version, Kind: "ConfigMapList", diff --git a/internal/controller/postgrescluster/instance_test.go b/internal/controller/postgrescluster/instance_test.go index f7f59f50a5..c851d2b17b 100644 --- a/internal/controller/postgrescluster/instance_test.go +++ b/internal/controller/postgrescluster/instance_test.go @@ -6,6 +6,7 @@ package postgrescluster import ( "context" + "errors" "fmt" "os" "sort" @@ -15,7 +16,6 @@ import ( "github.com/go-logr/logr/funcr" "github.com/google/go-cmp/cmp/cmpopts" - "github.com/pkg/errors" "go.opentelemetry.io/otel" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" @@ -25,7 +25,6 @@ import ( "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/sets" @@ -1347,7 +1346,7 @@ func TestDeleteInstance(t *testing.T) { cluster := testCluster() cluster.Namespace = setupNamespace(t, cc).Name - assert.NilError(t, errors.WithStack(reconciler.Client.Create(ctx, cluster))) + assert.NilError(t, reconciler.Client.Create(ctx, cluster)) t.Cleanup(func() { // Remove finalizers, if any, so the namespace can terminate. assert.Check(t, client.IgnoreNotFound( @@ -1377,7 +1376,7 @@ func TestDeleteInstance(t *testing.T) { // Use the instance name to delete the single instance assert.NilError(t, reconciler.deleteInstance(ctx, cluster, instanceName)) - gvks := []schema.GroupVersionKind{ + gvks := []runtime.GVK{ corev1.SchemeGroupVersion.WithKind("PersistentVolumeClaim"), corev1.SchemeGroupVersion.WithKind("ConfigMap"), corev1.SchemeGroupVersion.WithKind("Secret"), @@ -1397,9 +1396,9 @@ func TestDeleteInstance(t *testing.T) { err := wait.PollUntilContextTimeout(ctx, time.Second*3, Scale(time.Second*30), false, func(ctx context.Context) (bool, error) { uList := &unstructured.UnstructuredList{} uList.SetGroupVersionKind(gvk) - assert.NilError(t, errors.WithStack(reconciler.Client.List(ctx, uList, + assert.NilError(t, reconciler.Client.List(ctx, uList, client.InNamespace(cluster.Namespace), - client.MatchingLabelsSelector{Selector: selector}))) + client.MatchingLabelsSelector{Selector: selector})) if len(uList.Items) == 0 { return true, nil diff --git a/internal/controller/postgrescluster/patroni_test.go b/internal/controller/postgrescluster/patroni_test.go index 4f1bbba0bc..4a55ba9d78 100644 --- a/internal/controller/postgrescluster/patroni_test.go +++ b/internal/controller/postgrescluster/patroni_test.go @@ -6,6 +6,7 @@ package postgrescluster import ( "context" + "errors" "fmt" "io" "os" @@ -14,7 +15,6 @@ import ( "testing" "time" - "github.com/pkg/errors" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" diff --git a/internal/controller/postgrescluster/pgadmin_test.go b/internal/controller/postgrescluster/pgadmin_test.go index 92ec6f42f1..5a818f06b4 100644 --- a/internal/controller/postgrescluster/pgadmin_test.go +++ b/internal/controller/postgrescluster/pgadmin_test.go @@ -6,11 +6,11 @@ package postgrescluster import ( "context" + "errors" "io" "strconv" "testing" - "github.com/pkg/errors" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" diff --git a/internal/controller/postgrescluster/pgbackrest.go b/internal/controller/postgrescluster/pgbackrest.go index 836df047fc..a6cfe8bba9 100644 --- a/internal/controller/postgrescluster/pgbackrest.go +++ b/internal/controller/postgrescluster/pgbackrest.go @@ -23,15 +23,12 @@ import ( "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" utilerrors "k8s.io/apimachinery/pkg/util/errors" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/reconcile" "github.com/crunchydata/postgres-operator/internal/config" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/feature" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/logging" @@ -207,7 +204,7 @@ func (r *Reconciler) getPGBackRestResources(ctx context.Context, repoResources := &RepoResources{} - gvks := []schema.GroupVersionKind{{ + gvks := []runtime.GVK{{ Group: appsv1.SchemeGroupVersion.Group, Version: appsv1.SchemeGroupVersion.Version, Kind: "StatefulSetList", @@ -439,27 +436,24 @@ func unstructuredToRepoResources(kind string, repoResources *RepoResources, switch kind { case "StatefulSetList": - var stsList appsv1.StatefulSetList - if err := runtime.DefaultUnstructuredConverter. - FromUnstructured(uList.UnstructuredContent(), &stsList); err != nil { + stsList, err := runtime.FromUnstructuredList[appsv1.StatefulSetList](uList) + if err != nil { return errors.WithStack(err) } for i := range stsList.Items { repoResources.hosts = append(repoResources.hosts, &stsList.Items[i]) } case "CronJobList": - var cronList batchv1.CronJobList - if err := runtime.DefaultUnstructuredConverter. - FromUnstructured(uList.UnstructuredContent(), &cronList); err != nil { + cronList, err := runtime.FromUnstructuredList[batchv1.CronJobList](uList) + if err != nil { return errors.WithStack(err) } for i := range cronList.Items { repoResources.cronjobs = append(repoResources.cronjobs, &cronList.Items[i]) } case "JobList": - var jobList batchv1.JobList - if err := runtime.DefaultUnstructuredConverter. - FromUnstructured(uList.UnstructuredContent(), &jobList); err != nil { + jobList, err := runtime.FromUnstructuredList[batchv1.JobList](uList) + if err != nil { return errors.WithStack(err) } // we care about replica create backup jobs and manual backup jobs @@ -477,9 +471,8 @@ func unstructuredToRepoResources(kind string, repoResources *RepoResources, // Repository host now uses mTLS for encryption, authentication, and authorization. // Configmaps for SSHD are no longer managed here. case "PersistentVolumeClaimList": - var pvcList corev1.PersistentVolumeClaimList - if err := runtime.DefaultUnstructuredConverter. - FromUnstructured(uList.UnstructuredContent(), &pvcList); err != nil { + pvcList, err := runtime.FromUnstructuredList[corev1.PersistentVolumeClaimList](uList) + if err != nil { return errors.WithStack(err) } for i := range pvcList.Items { @@ -491,27 +484,24 @@ func unstructuredToRepoResources(kind string, repoResources *RepoResources, // TODO(tjmoore4): Consider adding all pgBackRest secrets to RepoResources to // observe all pgBackRest secrets in one place. case "ServiceAccountList": - var saList corev1.ServiceAccountList - if err := runtime.DefaultUnstructuredConverter. - FromUnstructured(uList.UnstructuredContent(), &saList); err != nil { + saList, err := runtime.FromUnstructuredList[corev1.ServiceAccountList](uList) + if err != nil { return errors.WithStack(err) } for i := range saList.Items { repoResources.sas = append(repoResources.sas, &saList.Items[i]) } case "RoleList": - var roleList rbacv1.RoleList - if err := runtime.DefaultUnstructuredConverter. - FromUnstructured(uList.UnstructuredContent(), &roleList); err != nil { + roleList, err := runtime.FromUnstructuredList[rbacv1.RoleList](uList) + if err != nil { return errors.WithStack(err) } for i := range roleList.Items { repoResources.roles = append(repoResources.roles, &roleList.Items[i]) } case "RoleBindingList": - var rb rbacv1.RoleBindingList - if err := runtime.DefaultUnstructuredConverter. - FromUnstructured(uList.UnstructuredContent(), &rb); err != nil { + rb, err := runtime.FromUnstructuredList[rbacv1.RoleBindingList](uList) + if err != nil { return errors.WithStack(err) } for i := range rb.Items { @@ -532,9 +522,8 @@ func (r *Reconciler) setScheduledJobStatus(ctx context.Context, log := logging.FromContext(ctx) uList := &unstructured.UnstructuredList{Items: items} - var jobList batchv1.JobList - if err := runtime.DefaultUnstructuredConverter. - FromUnstructured(uList.UnstructuredContent(), &jobList); err != nil { + jobList, err := runtime.FromUnstructuredList[batchv1.JobList](uList) + if err != nil { // as this is only setting a status that is not otherwise used // by the Operator, simply log an error and return rather than // bubble this up to the other functions @@ -714,8 +703,7 @@ func (r *Reconciler) generateRepoHostIntent(ctx context.Context, postgresCluster addTMPEmptyDir(&repo.Spec.Template) // set ownership references - if err := controllerutil.SetControllerReference(postgresCluster, repo, - r.Client.Scheme()); err != nil { + if err := r.setControllerReference(postgresCluster, repo); err != nil { return nil, err } @@ -760,8 +748,7 @@ func (r *Reconciler) generateRepoVolumeIntent(postgresCluster *v1beta1.PostgresC } // set ownership references - if err := controllerutil.SetControllerReference(postgresCluster, repoVol, - r.Client.Scheme()); err != nil { + if err := r.setControllerReference(postgresCluster, repoVol); err != nil { return nil, err } @@ -1878,7 +1865,7 @@ func (r *Reconciler) copyConfigurationResources(ctx context.Context, cluster, if sourceCluster.Spec.Backups.PGBackRest.Configuration[i].Secret != nil { secretProjection := sourceCluster.Spec.Backups.PGBackRest.Configuration[i].Secret secretCopy := &corev1.Secret{} - secretName := types.NamespacedName{ + secretName := client.ObjectKey{ Name: secretProjection.Name, Namespace: sourceCluster.Namespace, } @@ -1932,7 +1919,7 @@ func (r *Reconciler) copyConfigurationResources(ctx context.Context, cluster, if sourceCluster.Spec.Backups.PGBackRest.Configuration[i].ConfigMap != nil { configMapProjection := sourceCluster.Spec.Backups.PGBackRest.Configuration[i].ConfigMap configMapCopy := &corev1.ConfigMap{} - configMapName := types.NamespacedName{ + configMapName := client.ObjectKey{ Name: configMapProjection.Name, Namespace: sourceCluster.Namespace, } @@ -1993,8 +1980,7 @@ func (r *Reconciler) reconcilePGBackRestConfig(ctx context.Context, backrestConfig := pgbackrest.CreatePGBackRestConfigMapIntent(postgresCluster, repoHostName, configHash, serviceName, serviceNamespace, instanceNames) - if err := controllerutil.SetControllerReference(postgresCluster, backrestConfig, - r.Client.Scheme()); err != nil { + if err := r.setControllerReference(postgresCluster, backrestConfig); err != nil { return err } if err := r.apply(ctx, backrestConfig); err != nil { @@ -2380,8 +2366,7 @@ func (r *Reconciler) reconcileManualBackup(ctx context.Context, // set gvk and ownership refs backupJob.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("Job")) - if err := controllerutil.SetControllerReference(postgresCluster, backupJob, - r.Client.Scheme()); err != nil { + if err := r.setControllerReference(postgresCluster, backupJob); err != nil { return errors.WithStack(err) } @@ -2541,8 +2526,7 @@ func (r *Reconciler) reconcileReplicaCreateBackup(ctx context.Context, // set gvk and ownership refs backupJob.SetGroupVersionKind(batchv1.SchemeGroupVersion.WithKind("Job")) - if err := controllerutil.SetControllerReference(postgresCluster, backupJob, - r.Client.Scheme()); err != nil { + if err := r.setControllerReference(postgresCluster, backupJob); err != nil { return errors.WithStack(err) } diff --git a/internal/controller/postgrescluster/pgbackrest_test.go b/internal/controller/postgrescluster/pgbackrest_test.go index 8e34dabb5e..c078f37d8a 100644 --- a/internal/controller/postgrescluster/pgbackrest_test.go +++ b/internal/controller/postgrescluster/pgbackrest_test.go @@ -25,9 +25,7 @@ import ( "k8s.io/apimachinery/pkg/api/meta" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" - "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/labels" - "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/selection" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/rand" @@ -37,6 +35,7 @@ import ( "sigs.k8s.io/controller-runtime/pkg/manager" "sigs.k8s.io/controller-runtime/pkg/reconcile" + "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" "github.com/crunchydata/postgres-operator/internal/pgbackrest" @@ -3667,7 +3666,7 @@ func TestSetScheduledJobStatus(t *testing.T) { // create a PostgresCluster to test with postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) - testJob := &batchv1.Job{ + uList, err := runtime.ToUnstructuredList(&batchv1.JobList{Items: []batchv1.Job{{ TypeMeta: metav1.TypeMeta{ Kind: "Job", }, @@ -3680,18 +3679,8 @@ func TestSetScheduledJobStatus(t *testing.T) { Succeeded: 2, Failed: 3, }, - } - - // convert the runtime.Object to an unstructured object - unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(testJob) + }}}) assert.NilError(t, err) - unstructuredJob := &unstructured.Unstructured{ - Object: unstructuredObj, - } - - // add it to an unstructured list - uList := &unstructured.UnstructuredList{} - uList.Items = append(uList.Items, *unstructuredJob) // set the status r.setScheduledJobStatus(ctx, postgresCluster, uList.Items) @@ -3706,7 +3695,7 @@ func TestSetScheduledJobStatus(t *testing.T) { // create a PostgresCluster to test with postgresCluster := fakePostgresCluster(clusterName, ns.GetName(), clusterUID, true) - testJob := &batchv1.Job{ + uList, err := runtime.ToUnstructuredList(&batchv1.JobList{Items: []batchv1.Job{{ TypeMeta: metav1.TypeMeta{ Kind: "Job", }, @@ -3718,18 +3707,8 @@ func TestSetScheduledJobStatus(t *testing.T) { Succeeded: 2, Failed: 3, }, - } - - // convert the runtime.Object to an unstructured object - unstructuredObj, err := runtime.DefaultUnstructuredConverter.ToUnstructured(testJob) + }}}) assert.NilError(t, err) - unstructuredJob := &unstructured.Unstructured{ - Object: unstructuredObj, - } - - // add it to an unstructured list - uList := &unstructured.UnstructuredList{} - uList.Items = append(uList.Items, *unstructuredJob) // set the status r.setScheduledJobStatus(ctx, postgresCluster, uList.Items) diff --git a/internal/controller/postgrescluster/pgbouncer_test.go b/internal/controller/postgrescluster/pgbouncer_test.go index 9bbced5247..23c502d297 100644 --- a/internal/controller/postgrescluster/pgbouncer_test.go +++ b/internal/controller/postgrescluster/pgbouncer_test.go @@ -6,10 +6,10 @@ package postgrescluster import ( "context" + "errors" "strconv" "testing" - "github.com/pkg/errors" "gotest.tools/v3/assert" corev1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" diff --git a/internal/controller/postgrescluster/pki_test.go b/internal/controller/postgrescluster/pki_test.go index c2fe7af82a..74099b353f 100644 --- a/internal/controller/postgrescluster/pki_test.go +++ b/internal/controller/postgrescluster/pki_test.go @@ -12,7 +12,6 @@ import ( "strings" "testing" - "github.com/pkg/errors" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" @@ -145,8 +144,7 @@ func TestReconcileCerts(t *testing.T) { emptyRootSecret.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) emptyRootSecret.Namespace, emptyRootSecret.Name = namespace, naming.RootCertSecret emptyRootSecret.Data = make(map[string][]byte) - err = errors.WithStack(r.apply(ctx, emptyRootSecret)) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, emptyRootSecret)) // reconcile the root cert secret, creating a new root cert returnedRoot, err := r.reconcileRootCertificate(ctx, cluster1) @@ -206,7 +204,7 @@ func TestReconcileCerts(t *testing.T) { emptyRootSecret.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) emptyRootSecret.Namespace, emptyRootSecret.Name = namespace, naming.RootCertSecret emptyRootSecret.Data = make(map[string][]byte) - err = errors.WithStack(r.apply(ctx, emptyRootSecret)) + assert.NilError(t, r.apply(ctx, emptyRootSecret)) // reconcile the root cert secret newRootCert, err := r.reconcileRootCertificate(ctx, cluster1) @@ -331,8 +329,7 @@ func TestReconcileCerts(t *testing.T) { emptyRootSecret.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("Secret")) emptyRootSecret.Namespace, emptyRootSecret.Name = namespace, naming.RootCertSecret emptyRootSecret.Data = make(map[string][]byte) - err = errors.WithStack(r.apply(ctx, emptyRootSecret)) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, emptyRootSecret)) // reconcile the root cert secret, creating a new root cert returnedRoot, err := r.reconcileRootCertificate(ctx, cluster1) @@ -392,7 +389,7 @@ func getCertFromSecret( // get the cert from the secret secretCRT, ok := secret.Data[dataKey] if !ok { - return nil, errors.New(fmt.Sprintf("could not retrieve %s", dataKey)) + return nil, fmt.Errorf("could not retrieve %s", dataKey) } // parse the cert from binary encoded data diff --git a/internal/controller/postgrescluster/postgres_test.go b/internal/controller/postgrescluster/postgres_test.go index 0780b0f577..901663b600 100644 --- a/internal/controller/postgrescluster/postgres_test.go +++ b/internal/controller/postgrescluster/postgres_test.go @@ -6,6 +6,7 @@ package postgrescluster import ( "context" + "errors" "fmt" "io" "testing" @@ -13,7 +14,6 @@ import ( "github.com/go-logr/logr/funcr" "github.com/google/go-cmp/cmp/cmpopts" volumesnapshotv1 "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1" - "github.com/pkg/errors" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" diff --git a/internal/controller/postgrescluster/snapshots_test.go b/internal/controller/postgrescluster/snapshots_test.go index 4c3d987ecd..98e2336494 100644 --- a/internal/controller/postgrescluster/snapshots_test.go +++ b/internal/controller/postgrescluster/snapshots_test.go @@ -9,7 +9,6 @@ import ( "testing" "time" - "github.com/pkg/errors" "gotest.tools/v3/assert" appsv1 "k8s.io/api/apps/v1" batchv1 "k8s.io/api/batch/v1" @@ -72,34 +71,29 @@ func TestReconcileVolumeSnapshots(t *testing.T) { volumeSnapshotClassName := "my-snapshotclass" snapshot, err := r.generateVolumeSnapshot(cluster, *pvc, volumeSnapshotClassName) assert.NilError(t, err) - err = errors.WithStack(r.apply(ctx, snapshot)) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, snapshot)) // Get all snapshots for this cluster and assert 1 exists selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) assert.NilError(t, err) snapshots := &volumesnapshotv1.VolumeSnapshotList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) - assert.NilError(t, err) assert.Equal(t, len(snapshots.Items), 1) // Reconcile snapshots - err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) - assert.NilError(t, err) + assert.NilError(t, r.reconcileVolumeSnapshots(ctx, cluster, pvc)) // Get all snapshots for this cluster and assert 0 exist - assert.NilError(t, err) snapshots = &volumesnapshotv1.VolumeSnapshotList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) - assert.NilError(t, err) assert.Equal(t, len(snapshots.Items), 0) }) @@ -131,8 +125,7 @@ func TestReconcileVolumeSnapshots(t *testing.T) { } // Reconcile - err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) - assert.NilError(t, err) + assert.NilError(t, r.reconcileVolumeSnapshots(ctx, cluster, pvc)) // Assert warning event was created and has expected attributes if assert.Check(t, len(recorder.Events) > 0) { @@ -173,19 +166,17 @@ func TestReconcileVolumeSnapshots(t *testing.T) { } // Reconcile - err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) - assert.NilError(t, err) + assert.NilError(t, r.reconcileVolumeSnapshots(ctx, cluster, pvc)) // Assert no snapshots exist selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) assert.NilError(t, err) snapshots := &volumesnapshotv1.VolumeSnapshotList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) - assert.NilError(t, err) assert.Equal(t, len(snapshots.Items), 0) }) @@ -244,18 +235,15 @@ func TestReconcileVolumeSnapshots(t *testing.T) { }, }, } - err := errors.WithStack(r.setControllerReference(cluster, snapshot1)) - assert.NilError(t, err) - err = r.apply(ctx, snapshot1) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, snapshot1)) + assert.NilError(t, r.apply(ctx, snapshot1)) // Update snapshot status truePtr := initialize.Bool(true) snapshot1.Status = &volumesnapshotv1.VolumeSnapshotStatus{ ReadyToUse: truePtr, } - err = r.Client.Status().Update(ctx, snapshot1) - assert.NilError(t, err) + assert.NilError(t, r.Client.Status().Update(ctx, snapshot1)) // Create second snapshot with different annotation value snapshot2 := &volumesnapshotv1.VolumeSnapshot{ @@ -279,38 +267,32 @@ func TestReconcileVolumeSnapshots(t *testing.T) { }, }, } - err = errors.WithStack(r.setControllerReference(cluster, snapshot2)) - assert.NilError(t, err) - err = r.apply(ctx, snapshot2) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, snapshot2)) + assert.NilError(t, r.apply(ctx, snapshot2)) // Update second snapshot's status snapshot2.Status = &volumesnapshotv1.VolumeSnapshotStatus{ ReadyToUse: truePtr, } - err = r.Client.Status().Update(ctx, snapshot2) - assert.NilError(t, err) + assert.NilError(t, r.Client.Status().Update(ctx, snapshot2)) // Reconcile - err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) - assert.NilError(t, err) + assert.NilError(t, r.reconcileVolumeSnapshots(ctx, cluster, pvc)) // Assert first snapshot exists and second snapshot was deleted selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) assert.NilError(t, err) snapshots := &volumesnapshotv1.VolumeSnapshotList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) - assert.NilError(t, err) assert.Equal(t, len(snapshots.Items), 1) assert.Equal(t, snapshots.Items[0].Name, "first-snapshot") // Cleanup - err = r.deleteControlled(ctx, cluster, snapshot1) - assert.NilError(t, err) + assert.NilError(t, r.deleteControlled(ctx, cluster, snapshot1)) }) t.Run("SnapshotsEnabledCreateSnapshot", func(t *testing.T) { @@ -347,19 +329,17 @@ func TestReconcileVolumeSnapshots(t *testing.T) { } // Reconcile - err = r.reconcileVolumeSnapshots(ctx, cluster, pvc) - assert.NilError(t, err) + assert.NilError(t, r.reconcileVolumeSnapshots(ctx, cluster, pvc)) // Assert that a snapshot was created selectSnapshots, err := naming.AsSelector(naming.Cluster(cluster.Name)) assert.NilError(t, err) snapshots := &volumesnapshotv1.VolumeSnapshotList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, snapshots, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectSnapshots}, )) - assert.NilError(t, err) assert.Equal(t, len(snapshots.Items), 1) assert.Equal(t, snapshots.Items[0].Annotations[naming.PGBackRestBackupJobCompletion], "another-backup-timestamp") @@ -413,21 +393,18 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { }, Spec: testVolumeClaimSpec(), } - err = errors.WithStack(r.setControllerReference(cluster, pvc)) - assert.NilError(t, err) - err = r.apply(ctx, pvc) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, pvc)) + assert.NilError(t, r.apply(ctx, pvc)) // Assert that the pvc was created selectPvcs, err := naming.AsSelector(naming.Cluster(cluster.Name)) assert.NilError(t, err) pvcs := &corev1.PersistentVolumeClaimList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, pvcs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectPvcs}, )) - assert.NilError(t, err) assert.Equal(t, len(pvcs.Items), 1) // Create volumes for reconcile @@ -471,12 +448,11 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { selectPvcs, err := naming.AsSelector(naming.Cluster(cluster.Name)) assert.NilError(t, err) pvcs := &corev1.PersistentVolumeClaimList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, pvcs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectPvcs}, )) - assert.NilError(t, err) assert.Equal(t, len(pvcs.Items), 1) }) @@ -494,18 +470,15 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { // Create successful backup job backupJob := testBackupJob(cluster) - err = errors.WithStack(r.setControllerReference(cluster, backupJob)) - assert.NilError(t, err) - err = r.apply(ctx, backupJob) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, backupJob)) + assert.NilError(t, r.apply(ctx, backupJob)) currentTime := metav1.Now() backupJob.Status = batchv1.JobStatus{ Succeeded: 1, CompletionTime: ¤tTime, } - err = r.Client.Status().Update(ctx, backupJob) - assert.NilError(t, err) + assert.NilError(t, r.Client.Status().Update(ctx, backupJob)) // Create instance set and volumes for reconcile sts := &appsv1.StatefulSet{} @@ -521,12 +494,11 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { restoreJobs := &batchv1.JobList{} selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) assert.NilError(t, err) - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, restoreJobs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectJobs}, )) - assert.NilError(t, err) assert.Equal(t, len(restoreJobs.Items), 1) assert.Assert(t, restoreJobs.Items[0].Annotations[naming.PGBackRestBackupJobCompletion] != "") }) @@ -549,34 +521,28 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { // Create successful backup job backupJob := testBackupJob(cluster) - err = errors.WithStack(r.setControllerReference(cluster, backupJob)) - assert.NilError(t, err) - err = r.apply(ctx, backupJob) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, backupJob)) + assert.NilError(t, r.apply(ctx, backupJob)) backupJob.Status = batchv1.JobStatus{ Succeeded: 1, CompletionTime: &earlierTime, } - err = r.Client.Status().Update(ctx, backupJob) - assert.NilError(t, err) + assert.NilError(t, r.Client.Status().Update(ctx, backupJob)) // Create successful restore job restoreJob := testRestoreJob(cluster) restoreJob.Annotations = map[string]string{ naming.PGBackRestBackupJobCompletion: backupJob.Status.CompletionTime.Format(time.RFC3339), } - err = errors.WithStack(r.setControllerReference(cluster, restoreJob)) - assert.NilError(t, err) - err = r.apply(ctx, restoreJob) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, restoreJob)) + assert.NilError(t, r.apply(ctx, restoreJob)) restoreJob.Status = batchv1.JobStatus{ Succeeded: 1, CompletionTime: ¤tTime, } - err = r.Client.Status().Update(ctx, restoreJob) - assert.NilError(t, err) + assert.NilError(t, r.Client.Status().Update(ctx, restoreJob)) // Create instance set and volumes for reconcile sts := &appsv1.StatefulSet{} @@ -592,12 +558,11 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { restoreJobs := &batchv1.JobList{} selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) assert.NilError(t, err) - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, restoreJobs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectJobs}, )) - assert.NilError(t, err) assert.Equal(t, len(restoreJobs.Items), 0) // Assert pvc was annotated @@ -622,35 +587,29 @@ func TestReconcileDedicatedSnapshotVolume(t *testing.T) { // Create successful backup job backupJob := testBackupJob(cluster) - err = errors.WithStack(r.setControllerReference(cluster, backupJob)) - assert.NilError(t, err) - err = r.apply(ctx, backupJob) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, backupJob)) + assert.NilError(t, r.apply(ctx, backupJob)) backupJob.Status = batchv1.JobStatus{ Succeeded: 1, CompletionTime: &earlierTime, } - err = r.Client.Status().Update(ctx, backupJob) - assert.NilError(t, err) + assert.NilError(t, r.Client.Status().Update(ctx, backupJob)) // Create failed restore job restoreJob := testRestoreJob(cluster) restoreJob.Annotations = map[string]string{ naming.PGBackRestBackupJobCompletion: backupJob.Status.CompletionTime.Format(time.RFC3339), } - err = errors.WithStack(r.setControllerReference(cluster, restoreJob)) - assert.NilError(t, err) - err = r.apply(ctx, restoreJob) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, restoreJob)) + assert.NilError(t, r.apply(ctx, restoreJob)) restoreJob.Status = batchv1.JobStatus{ Succeeded: 0, Failed: 1, CompletionTime: ¤tTime, } - err = r.Client.Status().Update(ctx, restoreJob) - assert.NilError(t, err) + assert.NilError(t, r.Client.Status().Update(ctx, restoreJob)) // Setup instances and volumes for reconcile sts := &appsv1.StatefulSet{} @@ -727,19 +686,17 @@ func TestDedicatedSnapshotVolumeRestore(t *testing.T) { backupJob := testBackupJob(cluster) backupJob.Status.CompletionTime = ¤tTime - err := r.dedicatedSnapshotVolumeRestore(ctx, cluster, pvc, backupJob) - assert.NilError(t, err) + assert.NilError(t, r.dedicatedSnapshotVolumeRestore(ctx, cluster, pvc, backupJob)) // Assert a restore job was created that has the correct annotation jobs := &batchv1.JobList{} selectJobs, err := naming.AsSelector(naming.ClusterRestoreJobs(cluster.Name)) assert.NilError(t, err) - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, jobs, client.InNamespace(cluster.Namespace), client.MatchingLabelsSelector{Selector: selectJobs}, )) - assert.NilError(t, err) assert.Equal(t, len(jobs.Items), 1) assert.Equal(t, jobs.Items[0].Annotations[naming.PGBackRestBackupJobCompletion], backupJob.Status.CompletionTime.Format(time.RFC3339)) @@ -851,8 +808,7 @@ func TestGetDedicatedSnapshotVolumeRestoreJob(t *testing.T) { job3.Name = "restore-job-3" job3.Namespace = ns.Name - err = r.apply(ctx, job3) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, job3)) dsvRestoreJob, err := r.getDedicatedSnapshotVolumeRestoreJob(ctx, cluster) assert.NilError(t, err) @@ -864,7 +820,6 @@ func TestGetDedicatedSnapshotVolumeRestoreJob(t *testing.T) { func TestGetLatestCompleteBackupJob(t *testing.T) { ctx := context.Background() _, cc := setupKubernetes(t) - // require.ParallelCapacity(t, 1) r := &Reconciler{ Client: cc, @@ -906,19 +861,16 @@ func TestGetLatestCompleteBackupJob(t *testing.T) { job2.Namespace = ns.Name job2.Name = "backup-job-2" - err = r.apply(ctx, job2) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, job2)) // Get job1 and update Status. - err = r.Client.Get(ctx, client.ObjectKeyFromObject(job1), job1) - assert.NilError(t, err) + assert.NilError(t, r.Client.Get(ctx, client.ObjectKeyFromObject(job1), job1)) job1.Status = batchv1.JobStatus{ Succeeded: 1, CompletionTime: ¤tTime, } - err = r.Client.Status().Update(ctx, job1) - assert.NilError(t, err) + assert.NilError(t, r.Client.Status().Update(ctx, job1)) latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) assert.NilError(t, err) @@ -940,30 +892,25 @@ func TestGetLatestCompleteBackupJob(t *testing.T) { job2.Namespace = ns.Name job2.Name = "backup-job-2" - err = r.apply(ctx, job2) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, job2)) // Get job1 and update Status. - err = r.Client.Get(ctx, client.ObjectKeyFromObject(job1), job1) - assert.NilError(t, err) + assert.NilError(t, r.Client.Get(ctx, client.ObjectKeyFromObject(job1), job1)) job1.Status = batchv1.JobStatus{ Succeeded: 1, CompletionTime: ¤tTime, } - err = r.Client.Status().Update(ctx, job1) - assert.NilError(t, err) + assert.NilError(t, r.Client.Status().Update(ctx, job1)) // Get job2 and update Status. - err = r.Client.Get(ctx, client.ObjectKeyFromObject(job2), job2) - assert.NilError(t, err) + assert.NilError(t, r.Client.Get(ctx, client.ObjectKeyFromObject(job2), job2)) job2.Status = batchv1.JobStatus{ Succeeded: 1, CompletionTime: &earlierTime, } - err = r.Client.Status().Update(ctx, job2) - assert.NilError(t, err) + assert.NilError(t, r.Client.Status().Update(ctx, job2)) latestCompleteBackupJob, err := r.getLatestCompleteBackupJob(ctx, cluster) assert.NilError(t, err) @@ -1113,8 +1060,7 @@ func TestGetSnapshotsForCluster(t *testing.T) { } snapshot.Spec.Source.PersistentVolumeClaimName = initialize.String("some-pvc-name") snapshot.Spec.VolumeSnapshotClassName = initialize.String("some-class-name") - err := r.apply(ctx, snapshot) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, snapshot)) snapshots, err := r.getSnapshotsForCluster(ctx, cluster) assert.NilError(t, err) @@ -1155,8 +1101,7 @@ func TestGetSnapshotsForCluster(t *testing.T) { } snapshot2.Spec.Source.PersistentVolumeClaimName = initialize.String("another-pvc-name") snapshot2.Spec.VolumeSnapshotClassName = initialize.String("another-class-name") - err = r.apply(ctx, snapshot2) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, snapshot2)) snapshots, err := r.getSnapshotsForCluster(ctx, cluster) assert.NilError(t, err) @@ -1198,8 +1143,7 @@ func TestGetSnapshotsForCluster(t *testing.T) { } snapshot2.Spec.Source.PersistentVolumeClaimName = initialize.String("another-pvc-name") snapshot2.Spec.VolumeSnapshotClassName = initialize.String("another-class-name") - err = r.apply(ctx, snapshot2) - assert.NilError(t, err) + assert.NilError(t, r.apply(ctx, snapshot2)) snapshots, err := r.getSnapshotsForCluster(ctx, cluster) assert.NilError(t, err) @@ -1359,24 +1303,20 @@ func TestDeleteSnapshots(t *testing.T) { }, }, } - err := errors.WithStack(r.setControllerReference(rhinoCluster, snapshot1)) - assert.NilError(t, err) - err = r.apply(ctx, snapshot1) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(rhinoCluster, snapshot1)) + assert.NilError(t, r.apply(ctx, snapshot1)) snapshotList := &volumesnapshotv1.VolumeSnapshotList{ Items: []volumesnapshotv1.VolumeSnapshot{ *snapshot1, }, } - err = r.deleteSnapshots(ctx, cluster, snapshotList) - assert.NilError(t, err) + assert.NilError(t, r.deleteSnapshots(ctx, cluster, snapshotList)) existingSnapshots := &volumesnapshotv1.VolumeSnapshotList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, existingSnapshots, client.InNamespace(ns.Namespace), )) - assert.NilError(t, err) assert.Equal(t, len(existingSnapshots.Items), 1) }) @@ -1397,10 +1337,8 @@ func TestDeleteSnapshots(t *testing.T) { }, }, } - err := errors.WithStack(r.setControllerReference(rhinoCluster, snapshot1)) - assert.NilError(t, err) - err = r.apply(ctx, snapshot1) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(rhinoCluster, snapshot1)) + assert.NilError(t, r.apply(ctx, snapshot1)) snapshot2 := &volumesnapshotv1.VolumeSnapshot{ TypeMeta: metav1.TypeMeta{ @@ -1417,24 +1355,20 @@ func TestDeleteSnapshots(t *testing.T) { }, }, } - err = errors.WithStack(r.setControllerReference(cluster, snapshot2)) - assert.NilError(t, err) - err = r.apply(ctx, snapshot2) - assert.NilError(t, err) + assert.NilError(t, r.setControllerReference(cluster, snapshot2)) + assert.NilError(t, r.apply(ctx, snapshot2)) snapshotList := &volumesnapshotv1.VolumeSnapshotList{ Items: []volumesnapshotv1.VolumeSnapshot{ *snapshot1, *snapshot2, }, } - err = r.deleteSnapshots(ctx, cluster, snapshotList) - assert.NilError(t, err) + assert.NilError(t, r.deleteSnapshots(ctx, cluster, snapshotList)) existingSnapshots := &volumesnapshotv1.VolumeSnapshotList{} - err = errors.WithStack( + assert.NilError(t, r.Client.List(ctx, existingSnapshots, client.InNamespace(ns.Namespace), )) - assert.NilError(t, err) assert.Equal(t, len(existingSnapshots.Items), 1) assert.Equal(t, existingSnapshots.Items[0].Name, "first-snapshot") }) diff --git a/internal/controller/postgrescluster/suite_test.go b/internal/controller/postgrescluster/suite_test.go index 2a0e3d76ec..0b9736614a 100644 --- a/internal/controller/postgrescluster/suite_test.go +++ b/internal/controller/postgrescluster/suite_test.go @@ -7,7 +7,6 @@ package postgrescluster import ( "context" "os" - "path/filepath" "strings" "testing" @@ -20,19 +19,17 @@ import ( _ "k8s.io/client-go/plugin/pkg/client/auth/gcp" "k8s.io/client-go/rest" "sigs.k8s.io/controller-runtime/pkg/client" - "sigs.k8s.io/controller-runtime/pkg/envtest" "sigs.k8s.io/controller-runtime/pkg/log" "sigs.k8s.io/controller-runtime/pkg/manager" - "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/logging" + "github.com/crunchydata/postgres-operator/internal/testing/require" ) var suite struct { Client client.Client Config *rest.Config - Environment *envtest.Environment ServerVersion *version.Version Manager manager.Manager @@ -53,21 +50,7 @@ var _ = BeforeSuite(func() { log.SetLogger(logging.FromContext(context.Background())) By("bootstrapping test environment") - suite.Environment = &envtest.Environment{ - CRDDirectoryPaths: []string{ - filepath.Join("..", "..", "..", "config", "crd", "bases"), - filepath.Join("..", "..", "..", "hack", "tools", "external-snapshotter", "client", "config", "crd"), - }, - } - - _, err := suite.Environment.Start() - Expect(err).ToNot(HaveOccurred()) - - DeferCleanup(suite.Environment.Stop) - - suite.Config = suite.Environment.Config - suite.Client, err = client.New(suite.Config, client.Options{Scheme: runtime.Scheme}) - Expect(err).ToNot(HaveOccurred()) + suite.Config, suite.Client = require.Kubernetes2(GinkgoT()) dc, err := discovery.NewDiscoveryClientForConfig(suite.Config) Expect(err).ToNot(HaveOccurred()) diff --git a/internal/controller/runtime/conversion.go b/internal/controller/runtime/conversion.go new file mode 100644 index 0000000000..aa8e272c14 --- /dev/null +++ b/internal/controller/runtime/conversion.go @@ -0,0 +1,73 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package runtime + +import ( + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/controller-runtime/pkg/client" +) + +type ( + GR = schema.GroupResource + GV = schema.GroupVersion + GVK = schema.GroupVersionKind + GVR = schema.GroupVersionResource +) + +// These functions call the [runtime.DefaultUnstructuredConverter] with some additional type safety. +// An [unstructured.Unstructured] should always be paired with a [client.Object], and +// an [unstructured.UnstructuredList] should always be paired with a [client.ObjectList]. + +// FromUnstructuredList returns a copy of list by marshaling through JSON. +func FromUnstructuredList[ + // *T implements [client.ObjectList] + T any, PT interface { + client.ObjectList + *T + }, +](list *unstructured.UnstructuredList) (*T, error) { + result := new(T) + return result, runtime. + DefaultUnstructuredConverter. + FromUnstructured(list.UnstructuredContent(), result) +} + +// FromUnstructuredObject returns a copy of object by marshaling through JSON. +func FromUnstructuredObject[ + // *T implements [client.Object] + T any, PT interface { + client.Object + *T + }, +](object *unstructured.Unstructured) (*T, error) { + result := new(T) + return result, runtime. + DefaultUnstructuredConverter. + FromUnstructured(object.UnstructuredContent(), result) +} + +// ToUnstructuredList returns a copy of list by marshaling through JSON. +func ToUnstructuredList(list client.ObjectList) (*unstructured.UnstructuredList, error) { + content, err := runtime. + DefaultUnstructuredConverter. + ToUnstructured(list) + + result := new(unstructured.UnstructuredList) + result.SetUnstructuredContent(content) + return result, err +} + +// ToUnstructuredObject returns a copy of object by marshaling through JSON. +func ToUnstructuredObject(object client.Object) (*unstructured.Unstructured, error) { + content, err := runtime. + DefaultUnstructuredConverter. + ToUnstructured(object) + + result := new(unstructured.Unstructured) + result.SetUnstructuredContent(content) + return result, err +} diff --git a/internal/controller/runtime/conversion_test.go b/internal/controller/runtime/conversion_test.go new file mode 100644 index 0000000000..a80d59fad8 --- /dev/null +++ b/internal/controller/runtime/conversion_test.go @@ -0,0 +1,46 @@ +// Copyright 2021 - 2024 Crunchy Data Solutions, Inc. +// +// SPDX-License-Identifier: Apache-2.0 + +package runtime_test + +import ( + "testing" + + "gotest.tools/v3/assert" + corev1 "k8s.io/api/core/v1" + + "github.com/crunchydata/postgres-operator/internal/controller/runtime" +) + +func TestConvertUnstructured(t *testing.T) { + var cm corev1.ConfigMap + cm.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMap")) + cm.Namespace = "one" + cm.Name = "two" + cm.Data = map[string]string{"w": "x", "y": "z"} + + t.Run("List", func(t *testing.T) { + original := new(corev1.ConfigMapList) + original.SetGroupVersionKind(corev1.SchemeGroupVersion.WithKind("ConfigMapList")) + original.Items = []corev1.ConfigMap{*cm.DeepCopy()} + + list, err := runtime.ToUnstructuredList(original) + assert.NilError(t, err) + + converted, err := runtime.FromUnstructuredList[corev1.ConfigMapList](list) + assert.NilError(t, err) + assert.DeepEqual(t, original, converted) + }) + + t.Run("Object", func(t *testing.T) { + original := cm.DeepCopy() + + object, err := runtime.ToUnstructuredObject(original) + assert.NilError(t, err) + + converted, err := runtime.FromUnstructuredObject[corev1.ConfigMap](object) + assert.NilError(t, err) + assert.DeepEqual(t, original, converted) + }) +} diff --git a/internal/controller/standalone_pgadmin/users_test.go b/internal/controller/standalone_pgadmin/users_test.go index 4a600424b4..1188722cf3 100644 --- a/internal/controller/standalone_pgadmin/users_test.go +++ b/internal/controller/standalone_pgadmin/users_test.go @@ -7,12 +7,12 @@ package standalone_pgadmin import ( "context" "encoding/json" + "errors" "fmt" "io" "strings" "testing" - "github.com/pkg/errors" "gotest.tools/v3/assert" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -310,8 +310,8 @@ func TestWritePGAdminUsers(t *testing.T) { assert.Equal(t, calls, 1, "PodExec should be called once") secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -370,8 +370,8 @@ func TestWritePGAdminUsers(t *testing.T) { assert.Equal(t, updateUserCalls, 1, "The update-user command should be executed once") secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -442,8 +442,8 @@ func TestWritePGAdminUsers(t *testing.T) { assert.Equal(t, updateUserCalls, 1, "The update-user command should be executed once") secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -487,8 +487,8 @@ func TestWritePGAdminUsers(t *testing.T) { assert.Equal(t, calls, 0, "PodExec should be called zero times") secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -529,8 +529,8 @@ func TestWritePGAdminUsers(t *testing.T) { // User in users.json should be unchanged secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -556,8 +556,8 @@ func TestWritePGAdminUsers(t *testing.T) { assert.Equal(t, calls, 2, "PodExec should be called once more") // User in users.json should be unchanged - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -609,8 +609,8 @@ func TestWritePGAdminUsers(t *testing.T) { // User in users.json should be unchanged and attempt to add user should not // have succeeded secret := &corev1.Secret{ObjectMeta: naming.StandalonePGAdmin(pgadmin)} - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -637,8 +637,8 @@ func TestWritePGAdminUsers(t *testing.T) { // User in users.json should be unchanged and attempt to add user should not // have succeeded - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -665,8 +665,8 @@ func TestWritePGAdminUsers(t *testing.T) { // User in users.json should be unchanged and attempt to add user should not // have succeeded - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) @@ -694,8 +694,8 @@ func TestWritePGAdminUsers(t *testing.T) { // User in users.json should be unchanged and attempt to add user should not // have succeeded - assert.NilError(t, errors.WithStack( - reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret))) + assert.NilError(t, + reconciler.Client.Get(ctx, client.ObjectKeyFromObject(secret), secret)) if assert.Check(t, secret.Data["users.json"] != nil) { var usersArr []pgAdminUserForJson assert.NilError(t, json.Unmarshal(secret.Data["users.json"], &usersArr)) diff --git a/internal/controller/standalone_pgadmin/volume_test.go b/internal/controller/standalone_pgadmin/volume_test.go index 645c228277..530a0519ba 100644 --- a/internal/controller/standalone_pgadmin/volume_test.go +++ b/internal/controller/standalone_pgadmin/volume_test.go @@ -6,6 +6,7 @@ package standalone_pgadmin import ( "context" + "errors" "testing" "gotest.tools/v3/assert" @@ -16,8 +17,6 @@ import ( "k8s.io/apimachinery/pkg/util/validation/field" "sigs.k8s.io/controller-runtime/pkg/client" - "github.com/pkg/errors" - "github.com/crunchydata/postgres-operator/internal/controller/runtime" "github.com/crunchydata/postgres-operator/internal/initialize" "github.com/crunchydata/postgres-operator/internal/naming" diff --git a/internal/logging/logrus_test.go b/internal/logging/logrus_test.go index 3e73193d1a..1bbf9efc29 100644 --- a/internal/logging/logrus_test.go +++ b/internal/logging/logrus_test.go @@ -12,7 +12,7 @@ import ( "testing" "github.com/google/go-cmp/cmp" - "github.com/pkg/errors" + "github.com/pkg/errors" //nolint:depguard // This is testing the logging of stack frames. "gotest.tools/v3/assert" ) diff --git a/internal/testing/require/kubernetes.go b/internal/testing/require/kubernetes.go index df21bca058..51588342aa 100644 --- a/internal/testing/require/kubernetes.go +++ b/internal/testing/require/kubernetes.go @@ -11,8 +11,8 @@ import ( goruntime "runtime" "strings" "sync" - "testing" + "golang.org/x/tools/go/packages" "gotest.tools/v3/assert" corev1 "k8s.io/api/core/v1" "k8s.io/client-go/rest" @@ -22,6 +22,14 @@ import ( "github.com/crunchydata/postgres-operator/internal/controller/runtime" ) +type TestingT interface { + assert.TestingT + Cleanup(func()) + Helper() + Name() string + SkipNow() +} + // https://pkg.go.dev/sigs.k8s.io/controller-runtime/pkg/envtest#pkg-constants var envtestVarsSet = os.Getenv("KUBEBUILDER_ASSETS") != "" || strings.EqualFold(os.Getenv("USE_EXISTING_CLUSTER"), "true") @@ -29,7 +37,7 @@ var envtestVarsSet = os.Getenv("KUBEBUILDER_ASSETS") != "" || // EnvTest returns an unstarted Environment with crds. It calls t.Skip when // the "KUBEBUILDER_ASSETS" and "USE_EXISTING_CLUSTER" environment variables // are unset. -func EnvTest(t testing.TB, crds envtest.CRDInstallOptions) *envtest.Environment { +func EnvTest(t TestingT, crds envtest.CRDInstallOptions) *envtest.Environment { t.Helper() if !envtestVarsSet { @@ -59,7 +67,7 @@ var kubernetes struct { // // Tests that call t.Parallel might share the same local API. Call t.Parallel after this // function to ensure they share. -func Kubernetes(t testing.TB) client.Client { +func Kubernetes(t TestingT) client.Client { t.Helper() _, cc := kubernetes3(t) return cc @@ -67,13 +75,13 @@ func Kubernetes(t testing.TB) client.Client { // Kubernetes2 is the same as [Kubernetes] but also returns a copy of the client // configuration. -func Kubernetes2(t testing.TB) (*rest.Config, client.Client) { +func Kubernetes2(t TestingT) (*rest.Config, client.Client) { t.Helper() env, cc := kubernetes3(t) return rest.CopyConfig(env.Config), cc } -func kubernetes3(t testing.TB) (*envtest.Environment, client.Client) { +func kubernetes3(t TestingT) (*envtest.Environment, client.Client) { t.Helper() if !envtestVarsSet { @@ -102,6 +110,18 @@ func kubernetes3(t testing.TB) (*envtest.Environment, client.Client) { base, err := filepath.Rel(filepath.Dir(caller), root) assert.NilError(t, err) + // Calculate the snapshotter module directory path relative to the project directory. + var snapshotter string + if pkgs, err := packages.Load( + &packages.Config{Mode: packages.NeedModule}, + "github.com/kubernetes-csi/external-snapshotter/client/v8/apis/volumesnapshot/v1", + ); assert.Check(t, + err == nil && len(pkgs) > 0 && pkgs[0].Module != nil, "got %v\n%#v", err, pkgs, + ) { + snapshotter, err = filepath.Rel(root, pkgs[0].Module.Dir) + assert.NilError(t, err) + } + kubernetes.Lock() defer kubernetes.Unlock() @@ -110,7 +130,7 @@ func kubernetes3(t testing.TB) (*envtest.Environment, client.Client) { ErrorIfPathMissing: true, Paths: []string{ filepath.Join(base, "config", "crd", "bases"), - filepath.Join(base, "hack", "tools", "external-snapshotter", "client", "config", "crd"), + filepath.Join(base, snapshotter, "config", "crd"), }, Scheme: runtime.Scheme, }) @@ -145,7 +165,7 @@ func kubernetes3(t testing.TB) (*envtest.Environment, client.Client) { // Namespace creates a random namespace that is deleted by t.Cleanup. It calls // t.Fatal when creation fails. The caller may delete the namespace at any time. -func Namespace(t testing.TB, cc client.Client) *corev1.Namespace { +func Namespace(t TestingT, cc client.Client) *corev1.Namespace { t.Helper() // Remove / that shows up when running a sub-test diff --git a/internal/upgradecheck/header_test.go b/internal/upgradecheck/header_test.go index 9deb99d757..63c8d4b99c 100644 --- a/internal/upgradecheck/header_test.go +++ b/internal/upgradecheck/header_test.go @@ -32,7 +32,6 @@ func TestGenerateHeader(t *testing.T) { setupDeploymentID(t) ctx := context.Background() cfg, cc := require.Kubernetes2(t) - setupNamespace(t, cc) dc, err := discovery.NewDiscoveryClientForConfig(cfg) assert.NilError(t, err) @@ -43,6 +42,7 @@ func TestGenerateHeader(t *testing.T) { t.Setenv("PGO_INSTALLER", "test") t.Setenv("PGO_INSTALLER_ORIGIN", "test-origin") + t.Setenv("PGO_NAMESPACE", require.Namespace(t, cc).Name) t.Setenv("BUILD_SOURCE", "developer") t.Run("error ensuring ID", func(t *testing.T) { @@ -146,7 +146,7 @@ func TestGenerateHeader(t *testing.T) { func TestEnsureID(t *testing.T) { ctx := context.Background() cc := require.Kubernetes(t) - setupNamespace(t, cc) + t.Setenv("PGO_NAMESPACE", require.Namespace(t, cc).Name) t.Run("success, no id set in mem or configmap", func(t *testing.T) { deploymentID = "" @@ -282,7 +282,7 @@ func TestEnsureID(t *testing.T) { func TestManageUpgradeCheckConfigMap(t *testing.T) { ctx := context.Background() cc := require.Kubernetes(t) - setupNamespace(t, cc) + t.Setenv("PGO_NAMESPACE", require.Namespace(t, cc).Name) t.Run("no namespace given", func(t *testing.T) { ctx, calls := setupLogCapture(ctx) @@ -408,7 +408,7 @@ func TestManageUpgradeCheckConfigMap(t *testing.T) { func TestApplyConfigMap(t *testing.T) { ctx := context.Background() cc := require.Kubernetes(t) - setupNamespace(t, cc) + t.Setenv("PGO_NAMESPACE", require.Namespace(t, cc).Name) t.Run("successful create", func(t *testing.T) { cmRetrieved := &corev1.ConfigMap{} diff --git a/internal/upgradecheck/helpers_test.go b/internal/upgradecheck/helpers_test.go index 63184184db..abef591e5f 100644 --- a/internal/upgradecheck/helpers_test.go +++ b/internal/upgradecheck/helpers_test.go @@ -13,8 +13,6 @@ import ( "testing" "github.com/go-logr/logr/funcr" - "gotest.tools/v3/assert" - corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/uuid" @@ -154,26 +152,3 @@ func setupLogCapture(ctx context.Context) (context.Context, *[]string) { }) return logging.NewContext(ctx, testlog), &calls } - -// setupNamespace creates a namespace that will be deleted by t.Cleanup. -// For upgradechecking, this namespace is set to `postgres-operator`, -// which sometimes is created by other parts of the testing apparatus, -// cf., the createnamespace call in `make check-envtest-existing`. -// When creation fails, it calls t.Fatal. The caller may delete the namespace -// at any time. -func setupNamespace(t testing.TB, cc crclient.Client) { - t.Helper() - ns := &corev1.Namespace{} - ns.Name = "postgres-operator" - ns.Labels = map[string]string{"postgres-operator-test": t.Name()} - - ctx := context.Background() - exists := &corev1.Namespace{} - assert.NilError(t, crclient.IgnoreNotFound( - cc.Get(ctx, crclient.ObjectKeyFromObject(ns), exists))) - if exists.Name != "" { - return - } - assert.NilError(t, cc.Create(ctx, ns)) - t.Cleanup(func() { assert.Check(t, crclient.IgnoreNotFound(cc.Delete(ctx, ns))) }) -}