diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index e47b4bf4c..44c72cb44 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -208,13 +208,12 @@ def test_additional_pod_capabilities(self): try: k8s.update_config(patch_capabilities) - self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, - "Operator does not get in sync") # changed security context of postgres container should trigger a rolling update k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") self.eventuallyEqual(lambda: k8s.count_pods_with_container_capabilities(capabilities, cluster_label), 2, "Container capabilities not updated") @@ -240,8 +239,6 @@ def test_additional_teams_and_members(self): }, } k8s.update_config(enable_postgres_team_crd) - self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, - "Operator does not get in sync") k8s.api.custom_objects_api.patch_namespaced_custom_object( 'acid.zalan.do', 'v1', 'default', @@ -366,7 +363,7 @@ def test_config_update(self): try: k8s.api.custom_objects_api.patch_namespaced_custom_object( "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_config) - + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") def compare_config(): @@ -483,7 +480,7 @@ def test_cross_namespace_secrets(self): } } }) - + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") self.eventuallyEqual(lambda: k8s.count_secrets_with_label("cluster-name=acid-minimal-cluster,application=spilo", self.test_namespace), @@ -1002,12 +999,12 @@ def test_min_resource_limits(self): } k8s.api.custom_objects_api.patch_namespaced_custom_object( "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources) - self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, + "Operator does not get in sync") # wait for switched over k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) - self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members()), 2, "Postgres status did not enter running") def verify_pod_limits(): pods = k8s.api.core_v1.list_namespaced_pod('default', label_selector="cluster-name=acid-minimal-cluster,application=spilo").items @@ -1109,7 +1106,8 @@ def test_node_affinity(self): plural="postgresqls", name="acid-minimal-cluster", body=patch_node_affinity_config) - self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, + "Operator does not get in sync") # node affinity change should cause replica to relocate from replica node to master node due to node affinity requirement k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 80c59f872..9c1aada79 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -683,6 +683,10 @@ func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error { err error ) + if spec.Resources == nil { + return nil + } + // setting limits too low can cause unnecessary evictions / OOM kills minCPULimit := c.OpConfig.MinCPULimit minMemoryLimit := c.OpConfig.MinMemoryLimit diff --git a/pkg/cluster/cluster_test.go b/pkg/cluster/cluster_test.go index 20c1c5c93..401b1bc94 100644 --- a/pkg/cluster/cluster_test.go +++ b/pkg/cluster/cluster_test.go @@ -11,6 +11,7 @@ import ( acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake" "github.com/zalando/postgres-operator/pkg/spec" + "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" @@ -167,8 +168,17 @@ func TestInitAdditionalOwnerRoles(t *testing.T) { } cl.initAdditionalOwnerRoles() - if !reflect.DeepEqual(cl.pgUsers, expectedUsers) { - t.Errorf("%s expected: %#v, got %#v", testName, expectedUsers, cl.pgUsers) + + for _, additionalOwnerRole := range cl.Config.OpConfig.AdditionalOwnerRoles { + expectedPgUser := expectedUsers[additionalOwnerRole] + existingPgUser, exists := cl.pgUsers[additionalOwnerRole] + if !exists { + t.Errorf("%s additional owner role %q not initilaized", testName, additionalOwnerRole) + } + if !util.IsEqualIgnoreOrder(expectedPgUser.MemberOf, existingPgUser.MemberOf) { + t.Errorf("%s unexpected membership of additional owner role %q: expected member of %#v, got member of %#v", + testName, additionalOwnerRole, expectedPgUser.MemberOf, existingPgUser.MemberOf) + } } } diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 7e96adacc..23f3b9cd6 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -139,8 +139,8 @@ func (c *Cluster) makeDefaultResources() acidv1.Resources { func generateResourceRequirements(resources *acidv1.Resources, defaultResources acidv1.Resources) (*v1.ResourceRequirements, error) { var err error - var specRequests acidv1.ResourceDescription - var specLimits acidv1.ResourceDescription + var specRequests, specLimits acidv1.ResourceDescription + if resources == nil { specRequests = acidv1.ResourceDescription{} specLimits = acidv1.ResourceDescription{} @@ -1007,14 +1007,14 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef // controller adjusts the default memory request at operator startup - request := spec.Resources.ResourceRequests.Memory - if request == "" { - request = c.OpConfig.Resources.DefaultMemoryRequest - } + var request, limit string - limit := spec.Resources.ResourceLimits.Memory - if limit == "" { + if spec.Resources == nil { + request = c.OpConfig.Resources.DefaultMemoryRequest limit = c.OpConfig.Resources.DefaultMemoryLimit + } else { + request = spec.Resources.ResourceRequests.Memory + limit = spec.Resources.ResourceRequests.Memory } isSmaller, err := util.IsSmallerQuantity(request, limit) @@ -1024,7 +1024,6 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef if isSmaller { c.logger.Warningf("The memory request of %v for the Postgres container is increased to match the memory limit of %v.", request, limit) spec.Resources.ResourceRequests.Memory = limit - } // controller adjusts the Scalyr sidecar request at operator startup @@ -1034,14 +1033,14 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef for _, sidecar := range spec.Sidecars { // TODO #413 - sidecarRequest := sidecar.Resources.ResourceRequests.Memory - if request == "" { - request = c.OpConfig.Resources.DefaultMemoryRequest - } + var sidecarRequest, sidecarLimit string - sidecarLimit := sidecar.Resources.ResourceLimits.Memory - if limit == "" { - limit = c.OpConfig.Resources.DefaultMemoryLimit + if sidecar.Resources == nil { + sidecarRequest = c.OpConfig.Resources.DefaultMemoryRequest + sidecarLimit = c.OpConfig.Resources.DefaultMemoryLimit + } else { + sidecarRequest = sidecar.Resources.ResourceRequests.Memory + sidecarLimit = sidecar.Resources.ResourceRequests.Memory } isSmaller, err := util.IsSmallerQuantity(sidecarRequest, sidecarLimit) @@ -1057,7 +1056,6 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef } defaultResources := c.makeDefaultResources() - resourceRequirements, err := generateResourceRequirements(spec.Resources, defaultResources) if err != nil { return nil, fmt.Errorf("could not generate resource requirements: %v", err) diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index daa8aa4e4..85305504f 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -1578,7 +1578,7 @@ func TestEnableLoadBalancers(t *testing.T) { EnableReplicaLoadBalancer: util.False(), EnableReplicaPoolerLoadBalancer: util.False(), NumberOfInstances: 1, - Resources: acidv1.Resources{ + Resources: &acidv1.Resources{ ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, }, @@ -1625,7 +1625,7 @@ func TestEnableLoadBalancers(t *testing.T) { EnableReplicaLoadBalancer: util.True(), EnableReplicaPoolerLoadBalancer: util.True(), NumberOfInstances: 1, - Resources: acidv1.Resources{ + Resources: &acidv1.Resources{ ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, }, @@ -1720,7 +1720,7 @@ func TestVolumeSelector(t *testing.T) { return acidv1.PostgresSpec{ TeamID: "myapp", NumberOfInstances: 0, - Resources: acidv1.Resources{ + Resources: &acidv1.Resources{ ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, },