diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index 60a0a1f04..78624e0bd 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -55,7 +55,7 @@ configKubernetes: # additional labels assigned to the cluster objects cluster_labels: application:spilo # label assigned to Kubernetes objects created by the operator - cluster_name_label: version + cluster_name_label: cluster-name # annotations attached to each database pod # custom_pod_annotations: "keya:valuea,keyb:valueb" diff --git a/docs/user.md b/docs/user.md index f81e11ede..e1baf9ad1 100644 --- a/docs/user.md +++ b/docs/user.md @@ -65,7 +65,7 @@ our test cluster. ```bash # get name of master pod of acid-minimal-cluster -export PGMASTER=$(kubectl get pods -o jsonpath={.items..metadata.name} -l application=spilo,version=acid-minimal-cluster,spilo-role=master) +export PGMASTER=$(kubectl get pods -o jsonpath={.items..metadata.name} -l application=spilo,cluster-name=acid-minimal-cluster,spilo-role=master) # set up port forward kubectl port-forward $PGMASTER 6432:5432 diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index 2d81a0647..12106601e 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -65,7 +65,7 @@ def test_enable_load_balancer(self): ''' k8s = self.k8s - cluster_label = 'version=acid-minimal-cluster' + cluster_label = 'cluster-name=acid-minimal-cluster' # enable load balancer services pg_patch_enable_lbs = { @@ -113,7 +113,7 @@ def test_min_resource_limits(self): Lower resource limits below configured minimum and let operator fix it ''' k8s = self.k8s - cluster_label = 'version=acid-minimal-cluster' + cluster_label = 'cluster-name=acid-minimal-cluster' _, failover_targets = k8s.get_pg_nodes(cluster_label) # configure minimum boundaries for CPU and memory limits @@ -172,7 +172,7 @@ def test_multi_namespace_support(self): k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml") k8s.wait_for_pod_start("spilo-role=master", self.namespace) - self.assert_master_is_unique(self.namespace, version="acid-test-cluster") + self.assert_master_is_unique(self.namespace, "acid-test-cluster") @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_scaling(self): @@ -180,7 +180,7 @@ def test_scaling(self): Scale up from 2 to 3 and back to 2 pods by updating the Postgres manifest at runtime. ''' k8s = self.k8s - labels = "version=acid-minimal-cluster" + labels = "cluster-name=acid-minimal-cluster" k8s.wait_for_pg_to_scale(3) self.assertEqual(3, k8s.count_pods_with_label(labels)) @@ -196,7 +196,7 @@ def test_taint_based_eviction(self): Add taint "postgres=:NoExecute" to node with master. This must cause a failover. ''' k8s = self.k8s - cluster_label = 'version=acid-minimal-cluster' + cluster_label = 'cluster-name=acid-minimal-cluster' # get nodes of master and replica(s) (expected target of new master) current_master_node, failover_targets = k8s.get_pg_nodes(cluster_label) @@ -334,9 +334,9 @@ def test_service_annotations(self): "foo": "bar", } self.assertTrue(k8s.check_service_annotations( - "version=acid-service-annotations,spilo-role=master", annotations)) + "cluster-name=acid-service-annotations,spilo-role=master", annotations)) self.assertTrue(k8s.check_service_annotations( - "version=acid-service-annotations,spilo-role=replica", annotations)) + "cluster-name=acid-service-annotations,spilo-role=replica", annotations)) # clean up unpatch_custom_service_annotations = { @@ -346,14 +346,14 @@ def test_service_annotations(self): } k8s.update_config(unpatch_custom_service_annotations) - def assert_master_is_unique(self, namespace='default', version="acid-minimal-cluster"): + def assert_master_is_unique(self, namespace='default', clusterName="acid-minimal-cluster"): ''' Check that there is a single pod in the k8s cluster with the label "spilo-role=master" To be called manually after operations that affect pods ''' k8s = self.k8s - labels = 'spilo-role=master,version=' + version + labels = 'spilo-role=master,cluster-name=' + clusterName num_of_master_pods = k8s.count_pods_with_label(labels, namespace) self.assertEqual(num_of_master_pods, 1, "Expected 1 master pod, found {}".format(num_of_master_pods)) @@ -438,7 +438,7 @@ def wait_for_pg_to_scale(self, number_of_instances, namespace='default'): _ = self.api.custom_objects_api.patch_namespaced_custom_object( "acid.zalan.do", "v1", namespace, "postgresqls", "acid-minimal-cluster", body) - labels = 'version=acid-minimal-cluster' + labels = 'cluster-name=acid-minimal-cluster' while self.count_pods_with_label(labels) != number_of_instances: time.sleep(self.RETRY_TIMEOUT_SEC) @@ -448,7 +448,7 @@ def count_pods_with_label(self, labels, namespace='default'): def wait_for_master_failover(self, expected_master_nodes, namespace='default'): pod_phase = 'Failing over' new_master_node = '' - labels = 'spilo-role=master,version=acid-minimal-cluster' + labels = 'spilo-role=master,cluster-name=acid-minimal-cluster' while (pod_phase != 'Running') or (new_master_node not in expected_master_nodes): pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index 6b00c30f4..4289a134c 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -10,7 +10,7 @@ data: cluster_domain: cluster.local cluster_history_entries: "1000" cluster_labels: application:spilo - cluster_name_label: version + cluster_name_label: cluster-name # custom_service_annotations: "keyx:valuez,keya:valuea" # custom_pod_annotations: "keya:valuea,keyb:valueb" db_hosted_zone: db.example.com diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index e6561e0f3..4468c8428 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -1498,8 +1498,8 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) { ) labels := map[string]string{ - "version": c.Name, - "application": "spilo-logical-backup", + c.OpConfig.ClusterNameLabel: c.Name, + "application": "spilo-logical-backup", } podAffinityTerm := v1.PodAffinityTerm{ LabelSelector: &metav1.LabelSelector{