Skip to content
16 changes: 7 additions & 9 deletions e2e/tests/test_e2e.py
Original file line number Diff line number Diff line change
Expand Up @@ -208,13 +208,12 @@ def test_additional_pod_capabilities(self):

try:
k8s.update_config(patch_capabilities)
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},
"Operator does not get in sync")

# changed security context of postgres container should trigger a rolling update
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)

self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
self.eventuallyEqual(lambda: k8s.count_pods_with_container_capabilities(capabilities, cluster_label),
2, "Container capabilities not updated")

Expand All @@ -240,8 +239,6 @@ def test_additional_teams_and_members(self):
},
}
k8s.update_config(enable_postgres_team_crd)
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},
"Operator does not get in sync")

k8s.api.custom_objects_api.patch_namespaced_custom_object(
'acid.zalan.do', 'v1', 'default',
Expand Down Expand Up @@ -366,7 +363,7 @@ def test_config_update(self):
try:
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_config)

self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")

def compare_config():
Expand Down Expand Up @@ -483,7 +480,7 @@ def test_cross_namespace_secrets(self):
}
}
})

self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},
"Operator does not get in sync")
self.eventuallyEqual(lambda: k8s.count_secrets_with_label("cluster-name=acid-minimal-cluster,application=spilo", self.test_namespace),
Expand Down Expand Up @@ -1002,12 +999,12 @@ def test_min_resource_limits(self):
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources)
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},
"Operator does not get in sync")

# wait for switched over
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members()), 2, "Postgres status did not enter running")

def verify_pod_limits():
pods = k8s.api.core_v1.list_namespaced_pod('default', label_selector="cluster-name=acid-minimal-cluster,application=spilo").items
Expand Down Expand Up @@ -1109,7 +1106,8 @@ def test_node_affinity(self):
plural="postgresqls",
name="acid-minimal-cluster",
body=patch_node_affinity_config)
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},
"Operator does not get in sync")

# node affinity change should cause replica to relocate from replica node to master node due to node affinity requirement
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label)
Expand Down
4 changes: 4 additions & 0 deletions pkg/cluster/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -683,6 +683,10 @@ func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error {
err error
)

if spec.Resources == nil {
return nil
}

// setting limits too low can cause unnecessary evictions / OOM kills
minCPULimit := c.OpConfig.MinCPULimit
minMemoryLimit := c.OpConfig.MinMemoryLimit
Expand Down
14 changes: 12 additions & 2 deletions pkg/cluster/cluster_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@ import (
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake"
"github.com/zalando/postgres-operator/pkg/spec"
"github.com/zalando/postgres-operator/pkg/util"
"github.com/zalando/postgres-operator/pkg/util/config"
"github.com/zalando/postgres-operator/pkg/util/constants"
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
Expand Down Expand Up @@ -167,8 +168,17 @@ func TestInitAdditionalOwnerRoles(t *testing.T) {
}

cl.initAdditionalOwnerRoles()
if !reflect.DeepEqual(cl.pgUsers, expectedUsers) {
t.Errorf("%s expected: %#v, got %#v", testName, expectedUsers, cl.pgUsers)

for _, additionalOwnerRole := range cl.Config.OpConfig.AdditionalOwnerRoles {
expectedPgUser := expectedUsers[additionalOwnerRole]
existingPgUser, exists := cl.pgUsers[additionalOwnerRole]
if !exists {
t.Errorf("%s additional owner role %q not initilaized", testName, additionalOwnerRole)
}
if !util.IsEqualIgnoreOrder(expectedPgUser.MemberOf, existingPgUser.MemberOf) {
t.Errorf("%s unexpected membership of additional owner role %q: expected member of %#v, got member of %#v",
testName, additionalOwnerRole, expectedPgUser.MemberOf, existingPgUser.MemberOf)
}
}
}

Expand Down
32 changes: 15 additions & 17 deletions pkg/cluster/k8sres.go
Original file line number Diff line number Diff line change
Expand Up @@ -139,8 +139,8 @@ func (c *Cluster) makeDefaultResources() acidv1.Resources {
func generateResourceRequirements(resources *acidv1.Resources, defaultResources acidv1.Resources) (*v1.ResourceRequirements, error) {
var err error

var specRequests acidv1.ResourceDescription
var specLimits acidv1.ResourceDescription
var specRequests, specLimits acidv1.ResourceDescription

if resources == nil {
specRequests = acidv1.ResourceDescription{}
specLimits = acidv1.ResourceDescription{}
Expand Down Expand Up @@ -1007,14 +1007,14 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef

// controller adjusts the default memory request at operator startup

request := spec.Resources.ResourceRequests.Memory
if request == "" {
request = c.OpConfig.Resources.DefaultMemoryRequest
}
var request, limit string

limit := spec.Resources.ResourceLimits.Memory
if limit == "" {
if spec.Resources == nil {
request = c.OpConfig.Resources.DefaultMemoryRequest
limit = c.OpConfig.Resources.DefaultMemoryLimit
} else {
request = spec.Resources.ResourceRequests.Memory
limit = spec.Resources.ResourceRequests.Memory
}

isSmaller, err := util.IsSmallerQuantity(request, limit)
Expand All @@ -1024,7 +1024,6 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
if isSmaller {
c.logger.Warningf("The memory request of %v for the Postgres container is increased to match the memory limit of %v.", request, limit)
spec.Resources.ResourceRequests.Memory = limit

}

// controller adjusts the Scalyr sidecar request at operator startup
Expand All @@ -1034,14 +1033,14 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
for _, sidecar := range spec.Sidecars {

// TODO #413
sidecarRequest := sidecar.Resources.ResourceRequests.Memory
if request == "" {
request = c.OpConfig.Resources.DefaultMemoryRequest
}
var sidecarRequest, sidecarLimit string

sidecarLimit := sidecar.Resources.ResourceLimits.Memory
if limit == "" {
limit = c.OpConfig.Resources.DefaultMemoryLimit
if sidecar.Resources == nil {
sidecarRequest = c.OpConfig.Resources.DefaultMemoryRequest
sidecarLimit = c.OpConfig.Resources.DefaultMemoryLimit
} else {
sidecarRequest = sidecar.Resources.ResourceRequests.Memory
sidecarLimit = sidecar.Resources.ResourceRequests.Memory
}

isSmaller, err := util.IsSmallerQuantity(sidecarRequest, sidecarLimit)
Expand All @@ -1057,7 +1056,6 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
}

defaultResources := c.makeDefaultResources()

resourceRequirements, err := generateResourceRequirements(spec.Resources, defaultResources)
if err != nil {
return nil, fmt.Errorf("could not generate resource requirements: %v", err)
Expand Down
6 changes: 3 additions & 3 deletions pkg/cluster/k8sres_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -1578,7 +1578,7 @@ func TestEnableLoadBalancers(t *testing.T) {
EnableReplicaLoadBalancer: util.False(),
EnableReplicaPoolerLoadBalancer: util.False(),
NumberOfInstances: 1,
Resources: acidv1.Resources{
Resources: &acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
},
Expand Down Expand Up @@ -1625,7 +1625,7 @@ func TestEnableLoadBalancers(t *testing.T) {
EnableReplicaLoadBalancer: util.True(),
EnableReplicaPoolerLoadBalancer: util.True(),
NumberOfInstances: 1,
Resources: acidv1.Resources{
Resources: &acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
},
Expand Down Expand Up @@ -1720,7 +1720,7 @@ func TestVolumeSelector(t *testing.T) {
return acidv1.PostgresSpec{
TeamID: "myapp",
NumberOfInstances: 0,
Resources: acidv1.Resources{
Resources: &acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
},
Expand Down