diff --git a/charts/postgres-operator/crds/operatorconfigurations.yaml b/charts/postgres-operator/crds/operatorconfigurations.yaml index 0e0efd6c1..a19d3cfd5 100644 --- a/charts/postgres-operator/crds/operatorconfigurations.yaml +++ b/charts/postgres-operator/crds/operatorconfigurations.yaml @@ -166,6 +166,10 @@ spec: type: string template: type: boolean + inherited_annotations: + type: array + items: + type: string inherited_labels: type: array items: diff --git a/charts/postgres-operator/values-crd.yaml b/charts/postgres-operator/values-crd.yaml index 21292a13e..e553c9d6f 100644 --- a/charts/postgres-operator/values-crd.yaml +++ b/charts/postgres-operator/values-crd.yaml @@ -91,7 +91,11 @@ configKubernetes: # namespaced name of the secret containing infrastructure roles names and passwords # infrastructure_roles_secret_name: postgresql-infrastructure-roles - # list of labels that can be inherited from the cluster manifest + # list of annotation keys that can be inherited from the cluster manifest + # inherited_annotations: + # - owned-by + + # list of label keys that can be inherited from the cluster manifest # inherited_labels: # - application # - environment diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index 8a7776c54..2f68c9d4b 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -88,7 +88,10 @@ configKubernetes: # namespaced name of the secret containing infrastructure roles names and passwords # infrastructure_roles_secret_name: postgresql-infrastructure-roles - # list of labels that can be inherited from the cluster manifest + # list of annotation keys that can be inherited from the cluster manifest + # inherited_annotations: owned-by + + # list of label keys that can be inherited from the cluster manifest # inherited_labels: application,environment # timeout for successful migration of master pods from unschedulable node diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index 63903cb81..87841d1d5 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -274,6 +274,12 @@ configuration they are grouped under the `kubernetes` key. are extracted. For the ConfigMap this has to be a string which allows referencing only one infrastructure roles secret. The default is empty. +* **inherited_annotations** + list of annotation keys that can be inherited from the cluster manifest, and + added to each child objects (`Deployment`, `StatefulSet`, `Pod`, `PDB` and + `Services`) created by the operator incl. the ones from the connection + pooler deployment. The default is empty. + * **pod_role_label** name of the label assigned to the Postgres pods (and services/endpoints) by the operator. The default is `spilo-role`. @@ -283,15 +289,16 @@ configuration they are grouped under the `kubernetes` key. objects. The default is `application:spilo`. * **inherited_labels** - list of labels that can be inherited from the cluster manifest, and added to - each child objects (`StatefulSet`, `Pod`, `Service` and `Endpoints`) created - by the operator. Typical use case is to dynamically pass labels that are - specific to a given Postgres cluster, in order to implement `NetworkPolicy`. - The default is empty. + list of label keys that can be inherited from the cluster manifest, and + added to each child objects (`Deployment`, `StatefulSet`, `Pod`, `PVCs`, + `PDB`, `Service`, `Endpoints` and `Secrets`) created by the operator. + Typical use case is to dynamically pass labels that are specific to a + given Postgres cluster, in order to implement `NetworkPolicy`. The default + is empty. * **cluster_name_label** - name of the label assigned to Kubernetes objects created by the operator that - indicates which cluster a given object belongs to. The default is + name of the label assigned to Kubernetes objects created by the operator + that indicates which cluster a given object belongs to. The default is `cluster-name`. * **node_readiness_label** diff --git a/e2e/tests/k8s_api.py b/e2e/tests/k8s_api.py index 30165e6a0..95e1dc9ad 100644 --- a/e2e/tests/k8s_api.py +++ b/e2e/tests/k8s_api.py @@ -117,7 +117,7 @@ def check_service_annotations(self, svc_labels, annotations, namespace='default' for svc in svcs: for key, value in annotations.items(): if not svc.metadata.annotations or key not in svc.metadata.annotations or svc.metadata.annotations[key] != value: - print("Expected key {} not found in annotations {}".format(key, svc.metadata.annotations)) + print("Expected key {} not found in service annotations {}".format(key, svc.metadata.annotations)) return False return True @@ -126,7 +126,7 @@ def check_statefulset_annotations(self, sset_labels, annotations, namespace='def for sset in ssets: for key, value in annotations.items(): if key not in sset.metadata.annotations or sset.metadata.annotations[key] != value: - print("Expected key {} not found in annotations {}".format(key, sset.metadata.annotations)) + print("Expected key {} not found in statefulset annotations {}".format(key, sset.metadata.annotations)) return False return True diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index b98d0d956..d396da01b 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -852,6 +852,7 @@ def test_statefulset_annotation_propagation(self): patch_sset_propagate_annotations = { "data": { "downscaler_annotations": "deployment-time,downscaler/*", + "inherited_annotations": "owned-by", } } k8s.update_config(patch_sset_propagate_annotations) @@ -861,6 +862,7 @@ def test_statefulset_annotation_propagation(self): "annotations": { "deployment-time": "2020-04-30 12:00:00", "downscaler/downtime_replicas": "0", + "owned-by": "acid", }, } } @@ -870,10 +872,9 @@ def test_statefulset_annotation_propagation(self): annotations = { "deployment-time": "2020-04-30 12:00:00", "downscaler/downtime_replicas": "0", + "owned-by": "acid", } - - self.eventuallyTrue(lambda: k8s.check_statefulset_annotations(cluster_label, annotations), "Annotations missing") - + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") self.eventuallyTrue(lambda: k8s.check_statefulset_annotations(cluster_label, annotations), "Annotations missing") @timeout_decorator.timeout(TEST_TIMEOUT_SEC) diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index 7b99f4f45..111701829 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -57,6 +57,7 @@ data: # kubernetes_use_configmaps: "false" # infrastructure_roles_secret_name: "postgresql-infrastructure-roles" # infrastructure_roles_secrets: "secretname:monitoring-roles,userkey:user,passwordkey:password,rolekey:inrole" + # inherited_annotations: owned-by # inherited_labels: application,environment # kube_iam_role: "" # log_s3_bucket: "" diff --git a/manifests/operatorconfiguration.crd.yaml b/manifests/operatorconfiguration.crd.yaml index f1270d136..8fbc6f042 100644 --- a/manifests/operatorconfiguration.crd.yaml +++ b/manifests/operatorconfiguration.crd.yaml @@ -164,6 +164,10 @@ spec: type: string template: type: boolean + inherited_annotations: + type: array + items: + type: string inherited_labels: type: array items: diff --git a/manifests/postgresql-operator-default-configuration.yaml b/manifests/postgresql-operator-default-configuration.yaml index fdfe09096..00b095c1b 100644 --- a/manifests/postgresql-operator-default-configuration.yaml +++ b/manifests/postgresql-operator-default-configuration.yaml @@ -49,6 +49,8 @@ configuration: # - secretname: "other-infrastructure-role" # userkey: "other-user-key" # passwordkey: "other-password-key" + # inherited_annotations: + # - owned-by # inherited_labels: # - application # - environment diff --git a/pkg/apis/acid.zalan.do/v1/crds.go b/pkg/apis/acid.zalan.do/v1/crds.go index 8bdf0cd1f..938abf7fc 100644 --- a/pkg/apis/acid.zalan.do/v1/crds.go +++ b/pkg/apis/acid.zalan.do/v1/crds.go @@ -961,6 +961,14 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{ }, }, }, + "inherited_annotations": { + Type: "array", + Items: &apiextv1.JSONSchemaPropsOrArray{ + Schema: &apiextv1.JSONSchemaProps{ + Type: "string", + }, + }, + }, "inherited_labels": { Type: "array", Items: &apiextv1.JSONSchemaPropsOrArray{ @@ -1407,7 +1415,7 @@ func buildCRD(name, kind, plural, short string, columns []apiextv1.CustomResourc }, Scope: apiextv1.NamespaceScoped, Versions: []apiextv1.CustomResourceDefinitionVersion{ - apiextv1.CustomResourceDefinitionVersion{ + { Name: SchemeGroupVersion.Version, Served: true, Storage: true, diff --git a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go index 6c7c7767b..e79405224 100644 --- a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go +++ b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go @@ -66,6 +66,7 @@ type KubernetesMetaConfiguration struct { PodRoleLabel string `json:"pod_role_label,omitempty"` ClusterLabels map[string]string `json:"cluster_labels,omitempty"` InheritedLabels []string `json:"inherited_labels,omitempty"` + InheritedAnnotations []string `json:"inherited_annotations,omitempty"` DownscalerAnnotations []string `json:"downscaler_annotations,omitempty"` ClusterNameLabel string `json:"cluster_name_label,omitempty"` DeleteAnnotationDateKey string `json:"delete_annotation_date_key,omitempty"` diff --git a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go index 51d9861e4..f04a29490 100644 --- a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go +++ b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go @@ -202,6 +202,11 @@ func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfigura *out = make([]string, len(*in)) copy(*out, *in) } + if in.InheritedAnnotations != nil { + in, out := &in.InheritedAnnotations, &out.InheritedAnnotations + *out = make([]string, len(*in)) + copy(*out, *in) + } if in.DownscalerAnnotations != nil { in, out := &in.DownscalerAnnotations, &out.DownscalerAnnotations *out = make([]string, len(*in)) diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go index 36c75bd91..82b855bf2 100644 --- a/pkg/cluster/connection_pooler.go +++ b/pkg/cluster/connection_pooler.go @@ -286,7 +286,7 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) ( ObjectMeta: metav1.ObjectMeta{ Labels: c.connectionPoolerLabels(role, true).MatchLabels, Namespace: c.Namespace, - Annotations: c.generatePodAnnotations(spec), + Annotations: c.annotationsSet(c.generatePodAnnotations(spec)), }, Spec: v1.PodSpec{ ServiceAccountName: c.OpConfig.PodServiceAccountName, @@ -325,7 +325,7 @@ func (c *Cluster) generateConnectionPoolerDeployment(connectionPooler *Connectio if *numberOfInstances < constants.ConnectionPoolerMinInstances { msg := "Adjusted number of connection pooler instances from %d to %d" - c.logger.Warningf(msg, numberOfInstances, constants.ConnectionPoolerMinInstances) + c.logger.Warningf(msg, *numberOfInstances, constants.ConnectionPoolerMinInstances) *numberOfInstances = constants.ConnectionPoolerMinInstances } @@ -339,7 +339,7 @@ func (c *Cluster) generateConnectionPoolerDeployment(connectionPooler *Connectio Name: connectionPooler.Name, Namespace: connectionPooler.Namespace, Labels: c.connectionPoolerLabels(connectionPooler.Role, true).MatchLabels, - Annotations: map[string]string{}, + Annotations: c.AnnotationsToPropagate(c.annotationsSet(nil)), // make StatefulSet object its owner to represent the dependency. // By itself StatefulSet is being deleted with "Orphaned" // propagation policy, which means that it's deletion will not @@ -390,7 +390,7 @@ func (c *Cluster) generateConnectionPoolerService(connectionPooler *ConnectionPo Name: connectionPooler.Name, Namespace: connectionPooler.Namespace, Labels: c.connectionPoolerLabels(connectionPooler.Role, false).MatchLabels, - Annotations: map[string]string{}, + Annotations: c.annotationsSet(c.generateServiceAnnotations(connectionPooler.Role, spec)), // make StatefulSet object its owner to represent the dependency. // By itself StatefulSet is being deleted with "Orphaned" // propagation policy, which means that it's deletion will not @@ -866,7 +866,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql } } - newAnnotations := c.AnnotationsToPropagate(c.ConnectionPooler[role].Deployment.Annotations) + newAnnotations := c.AnnotationsToPropagate(c.annotationsSet(c.ConnectionPooler[role].Deployment.Annotations)) if newAnnotations != nil { deployment, err = updateConnectionPoolerAnnotations(c.KubeClient, c.ConnectionPooler[role].Deployment, newAnnotations) if err != nil { diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 28d711a33..602695e0e 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -1184,13 +1184,13 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration) effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName) - annotations := c.generatePodAnnotations(spec) + podAnnotations := c.generatePodAnnotations(spec) // generate pod template for the statefulset, based on the spilo container and sidecars podTemplate, err = c.generatePodTemplate( c.Namespace, c.labelsSet(true), - annotations, + c.annotationsSet(podAnnotations), spiloContainer, initContainers, sidecarContainers, @@ -1236,15 +1236,16 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef return nil, fmt.Errorf("could not set the pod management policy to the unknown value: %v", c.OpConfig.PodManagementPolicy) } - annotations = make(map[string]string) - annotations[rollingUpdateStatefulsetAnnotationKey] = strconv.FormatBool(false) + stsAnnotations := make(map[string]string) + stsAnnotations[rollingUpdateStatefulsetAnnotationKey] = strconv.FormatBool(false) + stsAnnotations = c.AnnotationsToPropagate(c.annotationsSet(nil)) statefulSet := &appsv1.StatefulSet{ ObjectMeta: metav1.ObjectMeta{ Name: c.statefulSetName(), Namespace: c.Namespace, Labels: c.labelsSet(true), - Annotations: c.AnnotationsToPropagate(annotations), + Annotations: stsAnnotations, }, Spec: appsv1.StatefulSetSpec{ Replicas: &numberOfInstances, @@ -1537,9 +1538,10 @@ func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser) username := pgUser.Name secret := v1.Secret{ ObjectMeta: metav1.ObjectMeta{ - Name: c.credentialSecretName(username), - Namespace: namespace, - Labels: c.labelsSet(true), + Name: c.credentialSecretName(username), + Namespace: namespace, + Labels: c.labelsSet(true), + Annotations: c.annotationsSet(nil), }, Type: v1.SecretTypeOpaque, Data: map[string][]byte{ @@ -1613,7 +1615,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec) Name: c.serviceName(role), Namespace: c.Namespace, Labels: c.roleLabelsSet(true, role), - Annotations: c.generateServiceAnnotations(role, spec), + Annotations: c.annotationsSet(c.generateServiceAnnotations(role, spec)), }, Spec: serviceSpec, } @@ -1816,9 +1818,10 @@ func (c *Cluster) generatePodDisruptionBudget() *policybeta1.PodDisruptionBudget return &policybeta1.PodDisruptionBudget{ ObjectMeta: metav1.ObjectMeta{ - Name: c.podDisruptionBudgetName(), - Namespace: c.Namespace, - Labels: c.labelsSet(true), + Name: c.podDisruptionBudgetName(), + Namespace: c.Namespace, + Labels: c.labelsSet(true), + Annotations: c.annotationsSet(nil), }, Spec: policybeta1.PodDisruptionBudgetSpec{ MinAvailable: &minAvailable, @@ -1938,9 +1941,10 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) { cronJob := &batchv1beta1.CronJob{ ObjectMeta: metav1.ObjectMeta{ - Name: c.getLogicalBackupJobName(), - Namespace: c.Namespace, - Labels: c.labelsSet(true), + Name: c.getLogicalBackupJobName(), + Namespace: c.Namespace, + Labels: c.labelsSet(true), + Annotations: c.annotationsSet(nil), }, Spec: batchv1beta1.CronJobSpec{ Schedule: schedule, diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index 94736b531..2781144b2 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -368,8 +368,8 @@ func (c *Cluster) syncStatefulSet() error { } } } - annotations := c.AnnotationsToPropagate(c.Statefulset.Annotations) - c.updateStatefulSetAnnotations(annotations) + + c.updateStatefulSetAnnotations(c.AnnotationsToPropagate(c.annotationsSet(c.Statefulset.Annotations))) if !podsRollingUpdateRequired && !c.OpConfig.EnableLazySpiloUpgrade { // even if desired and actual statefulsets match @@ -412,11 +412,15 @@ func (c *Cluster) syncStatefulSet() error { // AnnotationsToPropagate get the annotations to update if required // based on the annotations in postgres CRD func (c *Cluster) AnnotationsToPropagate(annotations map[string]string) map[string]string { - toPropagateAnnotations := c.OpConfig.DownscalerAnnotations - pgCRDAnnotations := c.Postgresql.ObjectMeta.GetAnnotations() - if toPropagateAnnotations != nil && pgCRDAnnotations != nil { - for _, anno := range toPropagateAnnotations { + if annotations == nil { + annotations = make(map[string]string) + } + + pgCRDAnnotations := c.ObjectMeta.Annotations + + if pgCRDAnnotations != nil { + for _, anno := range c.OpConfig.DownscalerAnnotations { for k, v := range pgCRDAnnotations { matched, err := regexp.MatchString(anno, k) if err != nil { @@ -430,7 +434,11 @@ func (c *Cluster) AnnotationsToPropagate(annotations map[string]string) map[stri } } - return annotations + if len(annotations) > 0 { + return annotations + } + + return nil } // checkAndSetGlobalPostgreSQLConfiguration checks whether cluster-wide API parameters diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index a2fdcb08e..d5e887656 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -271,6 +271,33 @@ func (c *Cluster) getTeamMembers(teamID string) ([]string, error) { return members, nil } +// Returns annotations to be passed to child objects +func (c *Cluster) annotationsSet(annotations map[string]string) map[string]string { + + if annotations == nil { + annotations = make(map[string]string) + } + + pgCRDAnnotations := c.ObjectMeta.Annotations + + // allow to inherit certain labels from the 'postgres' object + if pgCRDAnnotations != nil { + for k, v := range pgCRDAnnotations { + for _, match := range c.OpConfig.InheritedAnnotations { + if k == match { + annotations[k] = v + } + } + } + } + + if len(annotations) > 0 { + return annotations + } + + return nil +} + func (c *Cluster) waitForPodLabel(podEvents chan PodEvent, stopChan chan struct{}, role *PostgresRole) (*v1.Pod, error) { timeout := time.After(c.OpConfig.PodLabelWaitTimeout) for { diff --git a/pkg/cluster/util_test.go b/pkg/cluster/util_test.go new file mode 100644 index 000000000..7afc59f28 --- /dev/null +++ b/pkg/cluster/util_test.go @@ -0,0 +1,141 @@ +package cluster + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake" + "github.com/zalando/postgres-operator/pkg/util" + "github.com/zalando/postgres-operator/pkg/util/config" + "github.com/zalando/postgres-operator/pkg/util/k8sutil" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + k8sFake "k8s.io/client-go/kubernetes/fake" +) + +func newFakeK8sAnnotationsClient() (k8sutil.KubernetesClient, *k8sFake.Clientset) { + clientSet := k8sFake.NewSimpleClientset() + acidClientSet := fakeacidv1.NewSimpleClientset() + + return k8sutil.KubernetesClient{ + PodDisruptionBudgetsGetter: clientSet.PolicyV1beta1(), + ServicesGetter: clientSet.CoreV1(), + StatefulSetsGetter: clientSet.AppsV1(), + PostgresqlsGetter: acidClientSet.AcidV1(), + }, clientSet +} + +func TestInheritedAnnotations(t *testing.T) { + testName := "test inheriting annotations from manifest" + client, _ := newFakeK8sAnnotationsClient() + clusterName := "acid-test-cluster" + namespace := "default" + annotationValue := "acid" + role := Master + + pg := acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Annotations: map[string]string{ + "owned-by": annotationValue, + }, + }, + Spec: acidv1.PostgresSpec{ + EnableReplicaConnectionPooler: boolToPointer(true), + Volume: acidv1.Volume{ + Size: "1Gi", + }, + }, + } + + var cluster = New( + Config{ + OpConfig: config.Config{ + ConnectionPooler: config.ConnectionPooler{ + ConnectionPoolerDefaultCPURequest: "100m", + ConnectionPoolerDefaultCPULimit: "100m", + ConnectionPoolerDefaultMemoryRequest: "100Mi", + ConnectionPoolerDefaultMemoryLimit: "100Mi", + NumberOfInstances: int32ToPointer(1), + }, + PodManagementPolicy: "ordered_ready", + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + DefaultCPURequest: "300m", + DefaultCPULimit: "300m", + DefaultMemoryRequest: "300Mi", + DefaultMemoryLimit: "300Mi", + InheritedAnnotations: []string{"owned-by"}, + PodRoleLabel: "spilo-role", + }, + }, + }, client, pg, logger, eventRecorder) + + cluster.Name = clusterName + cluster.Namespace = namespace + + // test annotationsSet function + inheritedAnnotations := cluster.annotationsSet(nil) + + listOptions := metav1.ListOptions{ + LabelSelector: cluster.labelsSet(false).String(), + } + + // check statefulset annotations + _, err := cluster.createStatefulSet() + assert.NoError(t, err) + + stsList, err := client.StatefulSets(namespace).List(context.TODO(), listOptions) + assert.NoError(t, err) + for _, sts := range stsList.Items { + if !(util.MapContains(sts.ObjectMeta.Annotations, inheritedAnnotations)) { + t.Errorf("%s: StatefulSet %v not inherited annotations %#v, got %#v", testName, sts.ObjectMeta.Name, inheritedAnnotations, sts.ObjectMeta.Annotations) + } + // pod template + if !(util.MapContains(sts.Spec.Template.ObjectMeta.Annotations, inheritedAnnotations)) { + t.Errorf("%s: pod template %v not inherited annotations %#v, got %#v", testName, sts.ObjectMeta.Name, inheritedAnnotations, sts.ObjectMeta.Annotations) + } + // pvc template + if util.MapContains(sts.Spec.VolumeClaimTemplates[0].Annotations, inheritedAnnotations) { + t.Errorf("%s: PVC template %v not expected to have inherited annotations %#v, got %#v", testName, sts.ObjectMeta.Name, inheritedAnnotations, sts.ObjectMeta.Annotations) + } + } + + // check service annotations + cluster.createService(Master) + svcList, err := client.Services(namespace).List(context.TODO(), listOptions) + assert.NoError(t, err) + for _, svc := range svcList.Items { + if !(util.MapContains(svc.ObjectMeta.Annotations, inheritedAnnotations)) { + t.Errorf("%s: Service %v not inherited annotations %#v, got %#v", testName, svc.ObjectMeta.Name, inheritedAnnotations, svc.ObjectMeta.Annotations) + } + } + + // check pod disruption budget annotations + cluster.createPodDisruptionBudget() + pdbList, err := client.PodDisruptionBudgets(namespace).List(context.TODO(), listOptions) + assert.NoError(t, err) + for _, pdb := range pdbList.Items { + if !(util.MapContains(pdb.ObjectMeta.Annotations, inheritedAnnotations)) { + t.Errorf("%s: Pod Disruption Budget %v not inherited annotations %#v, got %#v", testName, pdb.ObjectMeta.Name, inheritedAnnotations, pdb.ObjectMeta.Annotations) + } + } + + // check pooler deployment annotations + cluster.ConnectionPooler = map[PostgresRole]*ConnectionPoolerObjects{} + cluster.ConnectionPooler[role] = &ConnectionPoolerObjects{ + Name: cluster.connectionPoolerName(role), + ClusterName: cluster.ClusterName, + Namespace: cluster.Namespace, + Role: role, + } + deploy, err := cluster.generateConnectionPoolerDeployment(cluster.ConnectionPooler[role]) + assert.NoError(t, err) + + if !(util.MapContains(deploy.ObjectMeta.Annotations, inheritedAnnotations)) { + t.Errorf("%s: Deployment %v not inherited annotations %#v, got %#v", testName, deploy.ObjectMeta.Name, inheritedAnnotations, deploy.ObjectMeta.Annotations) + } + +} diff --git a/pkg/cluster/volumes_test.go b/pkg/cluster/volumes_test.go index 907b9959f..4288cdfc4 100644 --- a/pkg/cluster/volumes_test.go +++ b/pkg/cluster/volumes_test.go @@ -22,7 +22,7 @@ import ( "k8s.io/client-go/kubernetes/fake" ) -func NewFakeKubernetesClient() (k8sutil.KubernetesClient, *fake.Clientset) { +func newFakeK8sPVCclient() (k8sutil.KubernetesClient, *fake.Clientset) { clientSet := fake.NewSimpleClientset() return k8sutil.KubernetesClient{ @@ -34,7 +34,7 @@ func NewFakeKubernetesClient() (k8sutil.KubernetesClient, *fake.Clientset) { func TestResizeVolumeClaim(t *testing.T) { testName := "test resizing of persistent volume claims" - client, _ := NewFakeKubernetesClient() + client, _ := newFakeK8sPVCclient() clusterName := "acid-test-cluster" namespace := "default" newVolumeSize := "2Gi" diff --git a/pkg/controller/operator_config.go b/pkg/controller/operator_config.go index 20fb0f0dc..f5b8b3b51 100644 --- a/pkg/controller/operator_config.go +++ b/pkg/controller/operator_config.go @@ -15,7 +15,7 @@ import ( func (c *Controller) readOperatorConfigurationFromCRD(configObjectNamespace, configObjectName string) (*acidv1.OperatorConfiguration, error) { - config, err := c.KubeClient.AcidV1ClientSet.AcidV1().OperatorConfigurations(configObjectNamespace).Get( + config, err := c.KubeClient.OperatorConfigurationsGetter.OperatorConfigurations(configObjectNamespace).Get( context.TODO(), configObjectName, metav1.GetOptions{}) if err != nil { return nil, fmt.Errorf("could not get operator configuration object %q: %v", configObjectName, err) @@ -93,6 +93,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur result.PodRoleLabel = util.Coalesce(fromCRD.Kubernetes.PodRoleLabel, "spilo-role") result.ClusterLabels = util.CoalesceStrMap(fromCRD.Kubernetes.ClusterLabels, map[string]string{"application": "spilo"}) result.InheritedLabels = fromCRD.Kubernetes.InheritedLabels + result.InheritedAnnotations = fromCRD.Kubernetes.InheritedAnnotations result.DownscalerAnnotations = fromCRD.Kubernetes.DownscalerAnnotations result.ClusterNameLabel = util.Coalesce(fromCRD.Kubernetes.ClusterNameLabel, "cluster-name") result.DeleteAnnotationDateKey = fromCRD.Kubernetes.DeleteAnnotationDateKey diff --git a/pkg/controller/postgresql.go b/pkg/controller/postgresql.go index 4b5d68fe5..0fe0c1120 100644 --- a/pkg/controller/postgresql.go +++ b/pkg/controller/postgresql.go @@ -46,7 +46,7 @@ func (c *Controller) listClusters(options metav1.ListOptions) (*acidv1.Postgresq var pgList acidv1.PostgresqlList // TODO: use the SharedInformer cache instead of quering Kubernetes API directly. - list, err := c.KubeClient.AcidV1ClientSet.AcidV1().Postgresqls(c.opConfig.WatchedNamespace).List(context.TODO(), options) + list, err := c.KubeClient.PostgresqlsGetter.Postgresqls(c.opConfig.WatchedNamespace).List(context.TODO(), options) if err != nil { c.logger.Errorf("could not list postgresql objects: %v", err) } diff --git a/pkg/controller/util.go b/pkg/controller/util.go index 7f87de97d..815bc7b74 100644 --- a/pkg/controller/util.go +++ b/pkg/controller/util.go @@ -398,7 +398,7 @@ func (c *Controller) loadPostgresTeams() { // reset team map c.pgTeamMap = teams.PostgresTeamMap{} - pgTeams, err := c.KubeClient.AcidV1ClientSet.AcidV1().PostgresTeams(c.opConfig.WatchedNamespace).List(context.TODO(), metav1.ListOptions{}) + pgTeams, err := c.KubeClient.PostgresTeamsGetter.PostgresTeams(c.opConfig.WatchedNamespace).List(context.TODO(), metav1.ListOptions{}) if err != nil { c.logger.Errorf("could not list postgres team objects: %v", err) } diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index 122c192a5..0b2941683 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -36,6 +36,7 @@ type Resources struct { SpiloPrivileged bool `name:"spilo_privileged" default:"false"` ClusterLabels map[string]string `name:"cluster_labels" default:"application:spilo"` InheritedLabels []string `name:"inherited_labels" default:""` + InheritedAnnotations []string `name:"inherited_annotations" default:""` DownscalerAnnotations []string `name:"downscaler_annotations"` ClusterNameLabel string `name:"cluster_name_label" default:"cluster-name"` DeleteAnnotationDateKey string `name:"delete_annotation_date_key"` diff --git a/pkg/util/k8sutil/k8sutil.go b/pkg/util/k8sutil/k8sutil.go index 19f95d9f1..a23c1f842 100644 --- a/pkg/util/k8sutil/k8sutil.go +++ b/pkg/util/k8sutil/k8sutil.go @@ -11,7 +11,9 @@ import ( batchv1beta1 "k8s.io/api/batch/v1beta1" clientbatchv1beta1 "k8s.io/client-go/kubernetes/typed/batch/v1beta1" - acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + apiacidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" + acidv1client "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned" + acidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1" "github.com/zalando/postgres-operator/pkg/spec" apiappsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" @@ -19,6 +21,7 @@ import ( apiextclient "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset" apiextv1 "k8s.io/apiextensions-apiserver/pkg/client/clientset/clientset/typed/apiextensions/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/kubernetes" appsv1 "k8s.io/client-go/kubernetes/typed/apps/v1" @@ -27,9 +30,6 @@ import ( rbacv1 "k8s.io/client-go/kubernetes/typed/rbac/v1" "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" - - acidv1client "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned" - metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) func Int32ToPointer(value int32) *int32 { @@ -55,6 +55,9 @@ type KubernetesClient struct { policyv1beta1.PodDisruptionBudgetsGetter apiextv1.CustomResourceDefinitionsGetter clientbatchv1beta1.CronJobsGetter + acidv1.OperatorConfigurationsGetter + acidv1.PostgresTeamsGetter + acidv1.PostgresqlsGetter RESTClient rest.Interface AcidV1ClientSet *acidv1client.Clientset @@ -154,15 +157,23 @@ func NewFromConfig(cfg *rest.Config) (KubernetesClient, error) { } kubeClient.CustomResourceDefinitionsGetter = apiextClient.ApiextensionsV1() + kubeClient.AcidV1ClientSet = acidv1client.NewForConfigOrDie(cfg) + if err != nil { + return kubeClient, fmt.Errorf("could not create acid.zalan.do clientset: %v", err) + } + + kubeClient.OperatorConfigurationsGetter = kubeClient.AcidV1ClientSet.AcidV1() + kubeClient.PostgresTeamsGetter = kubeClient.AcidV1ClientSet.AcidV1() + kubeClient.PostgresqlsGetter = kubeClient.AcidV1ClientSet.AcidV1() return kubeClient, nil } // SetPostgresCRDStatus of Postgres cluster -func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.NamespacedName, status string) (*acidv1.Postgresql, error) { - var pg *acidv1.Postgresql - var pgStatus acidv1.PostgresStatus +func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.NamespacedName, status string) (*apiacidv1.Postgresql, error) { + var pg *apiacidv1.Postgresql + var pgStatus apiacidv1.PostgresStatus pgStatus.PostgresClusterStatus = status patch, err := json.Marshal(struct { @@ -176,7 +187,7 @@ func (client *KubernetesClient) SetPostgresCRDStatus(clusterName spec.Namespaced // we cannot do a full scale update here without fetching the previous manifest (as the resourceVersion may differ), // however, we could do patch without it. In the future, once /status subresource is there (starting Kubernetes 1.11) // we should take advantage of it. - pg, err = client.AcidV1ClientSet.AcidV1().Postgresqls(clusterName.Namespace).Patch( + pg, err = client.PostgresqlsGetter.Postgresqls(clusterName.Namespace).Patch( context.TODO(), clusterName.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "status") if err != nil { return pg, fmt.Errorf("could not update status: %v", err)