diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 424c8e89a..512f6a6c9 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -361,6 +361,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa } if !reflect.DeepEqual(c.Statefulset.Annotations, statefulSet.Annotations) { match = false + needsReplace = true reasons = append(reasons, "new statefulset's annotations do not match the current one") } @@ -598,7 +599,7 @@ func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error { // for a cluster that had no such job before. In this case a missing job is not an error. func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { updateFailed := false - syncStatetfulSet := false + syncStatefulSet := false c.mu.Lock() defer c.mu.Unlock() @@ -619,7 +620,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { if IsBiggerPostgresVersion(oldSpec.Spec.PostgresqlParam.PgVersion, c.GetDesiredMajorVersion()) { c.logger.Infof("postgresql version increased (%s -> %s), depending on config manual upgrade needed", oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion) - syncStatetfulSet = true + syncStatefulSet = true } else { c.logger.Infof("postgresql major version unchanged or smaller, no changes needed") // sticking with old version, this will also advance GetDesiredVersion next time. @@ -688,9 +689,9 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { updateFailed = true return } - if syncStatetfulSet || !reflect.DeepEqual(oldSs, newSs) || !reflect.DeepEqual(oldSpec.Annotations, newSpec.Annotations) { + if syncStatefulSet || !reflect.DeepEqual(oldSs, newSs) || !reflect.DeepEqual(oldSpec.Annotations, newSpec.Annotations) { c.logger.Debugf("syncing statefulsets") - syncStatetfulSet = false + syncStatefulSet = false // TODO: avoid generating the StatefulSet object twice by passing it to syncStatefulSet if err := c.syncStatefulSet(); err != nil { c.logger.Errorf("could not sync statefulsets: %v", err) diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index ed8b4099c..48b17f532 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -147,25 +147,6 @@ func (c *Cluster) preScaleDown(newStatefulSet *appsv1.StatefulSet) error { return nil } -func (c *Cluster) updateStatefulSetAnnotations(annotations map[string]string) (*appsv1.StatefulSet, error) { - c.logger.Debugf("patching statefulset annotations") - patchData, err := metaAnnotationsPatch(annotations) - if err != nil { - return nil, fmt.Errorf("could not form patch for the statefulset metadata: %v", err) - } - result, err := c.KubeClient.StatefulSets(c.Statefulset.Namespace).Patch( - context.TODO(), - c.Statefulset.Name, - types.MergePatchType, - []byte(patchData), - metav1.PatchOptions{}, - "") - if err != nil { - return nil, fmt.Errorf("could not patch statefulset annotations %q: %v", patchData, err) - } - return result, nil -} - func (c *Cluster) updateStatefulSet(newStatefulSet *appsv1.StatefulSet) error { c.setProcessName("updating statefulset") if c.Statefulset == nil { @@ -197,13 +178,6 @@ func (c *Cluster) updateStatefulSet(newStatefulSet *appsv1.StatefulSet) error { return fmt.Errorf("could not patch statefulset spec %q: %v", statefulSetName, err) } - if newStatefulSet.Annotations != nil { - statefulSet, err = c.updateStatefulSetAnnotations(newStatefulSet.Annotations) - if err != nil { - return err - } - } - c.Statefulset = statefulSet return nil diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index c82e528fd..3f08cfb4d 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -373,8 +373,6 @@ func (c *Cluster) syncStatefulSet() error { } } - c.updateStatefulSetAnnotations(c.AnnotationsToPropagate(c.annotationsSet(c.Statefulset.Annotations))) - if len(podsToRecreate) == 0 && !c.OpConfig.EnableLazySpiloUpgrade { // even if the desired and the running statefulsets match // there still may be not up-to-date pods on condition diff --git a/pkg/cluster/sync_test.go b/pkg/cluster/sync_test.go new file mode 100644 index 000000000..e6f23914b --- /dev/null +++ b/pkg/cluster/sync_test.go @@ -0,0 +1,115 @@ +package cluster + +import ( + "testing" + "time" + + "context" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + "github.com/stretchr/testify/assert" + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake" + "github.com/zalando/postgres-operator/pkg/util/config" + "github.com/zalando/postgres-operator/pkg/util/k8sutil" + "k8s.io/client-go/kubernetes/fake" +) + +func newFakeK8sSyncClient() (k8sutil.KubernetesClient, *fake.Clientset) { + acidClientSet := fakeacidv1.NewSimpleClientset() + clientSet := fake.NewSimpleClientset() + + return k8sutil.KubernetesClient{ + PodsGetter: clientSet.CoreV1(), + PostgresqlsGetter: acidClientSet.AcidV1(), + StatefulSetsGetter: clientSet.AppsV1(), + }, clientSet +} + +func TestSyncStatefulSetsAnnotations(t *testing.T) { + testName := "test syncing statefulsets annotations" + client, _ := newFakeK8sSyncClient() + clusterName := "acid-test-cluster" + namespace := "default" + inheritedAnnotation := "environment" + + pg := acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + Annotations: map[string]string{inheritedAnnotation: "test"}, + }, + Spec: acidv1.PostgresSpec{ + Volume: acidv1.Volume{ + Size: "1Gi", + }, + }, + } + + var cluster = New( + Config{ + OpConfig: config.Config{ + PodManagementPolicy: "ordered_ready", + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + DefaultCPURequest: "300m", + DefaultCPULimit: "300m", + DefaultMemoryRequest: "300Mi", + DefaultMemoryLimit: "300Mi", + InheritedAnnotations: []string{inheritedAnnotation}, + PodRoleLabel: "spilo-role", + ResourceCheckInterval: time.Duration(3), + ResourceCheckTimeout: time.Duration(10), + }, + }, + }, client, pg, logger, eventRecorder) + + cluster.Name = clusterName + cluster.Namespace = namespace + + // create a statefulset + _, err := cluster.createStatefulSet() + assert.NoError(t, err) + + // patch statefulset and add annotation + patchData, err := metaAnnotationsPatch(map[string]string{"test-anno": "true"}) + assert.NoError(t, err) + + newSts, err := cluster.KubeClient.StatefulSets(namespace).Patch( + context.TODO(), + clusterName, + types.MergePatchType, + []byte(patchData), + metav1.PatchOptions{}, + "") + assert.NoError(t, err) + + cluster.Statefulset = newSts + + // first compare running with desired statefulset - they should not match + // because no inherited annotations or downscaler annotations are configured + desiredSts, err := cluster.generateStatefulSet(&cluster.Postgresql.Spec) + assert.NoError(t, err) + + cmp := cluster.compareStatefulSetWith(desiredSts) + if cmp.match { + t.Errorf("%s: match between current and desired statefulsets albeit differences: %#v", testName, cmp) + } + + // now sync statefulset - the diff will trigger a replacement of the statefulset + cluster.syncStatefulSet() + + // compare again after the SYNC - must be identical to the desired state + cmp = cluster.compareStatefulSetWith(desiredSts) + if !cmp.match { + t.Errorf("%s: current and desired statefulsets are not matching %#v", testName, cmp) + } + + // check if inherited annotation exists + if _, exists := desiredSts.Annotations[inheritedAnnotation]; !exists { + t.Errorf("%s: inherited annotation not found in desired statefulset: %#v", testName, desiredSts.Annotations) + } +}