Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 14 additions & 0 deletions e2e/tests/k8s_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -252,6 +252,13 @@ def exec_with_kubectl(self, pod, cmd):
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)

def patroni_rest(self, pod, path):
r = self.exec_with_kubectl(pod, "curl localhost:8008/" + path)
if not r.returncode == 0 or not r.stdout.decode()[0:1] == "{":
return None

return json.loads(r.stdout.decode())

def get_patroni_state(self, pod):
r = self.exec_with_kubectl(pod, "patronictl list -f json")
if not r.returncode == 0 or not r.stdout.decode()[0:1] == "[":
Expand Down Expand Up @@ -514,6 +521,13 @@ def exec_with_kubectl(self, pod, cmd):
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)

def patroni_rest(self, pod, path):
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

duplicate of 246-252 ?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes, there's a lot of duplicate code. Don't understand why. I thought to better do it, too 😄
Someone should tidy this part up.

r = self.exec_with_kubectl(pod, "curl localhost:8008/" + path)
if not r.returncode == 0 or not r.stdout.decode()[0:1] == "{":
return None

return json.loads(r.stdout.decode())

def get_patroni_state(self, pod):
r = self.exec_with_kubectl(pod, "patronictl list -f json")
if not r.returncode == 0 or not r.stdout.decode()[0:1] == "[":
Expand Down
141 changes: 82 additions & 59 deletions e2e/tests/test_e2e.py
Original file line number Diff line number Diff line change
Expand Up @@ -324,65 +324,6 @@ def test_cross_namespace_secrets(self):
self.eventuallyEqual(lambda: self.k8s.count_secrets_with_label("cluster-name=acid-minimal-cluster,application=spilo", self.test_namespace),
1, "Secret not created for user in namespace")

@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_decrease_max_connections(self):
'''
Test decreasing max_connections and restarting cluster through rest api
'''
k8s = self.k8s
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
labels = 'spilo-role=master,' + cluster_label
new_max_connections_value = "99"
pods = k8s.api.core_v1.list_namespaced_pod(
'default', label_selector=labels).items
self.assert_master_is_unique()
masterPod = pods[0]
creationTimestamp = masterPod.metadata.creation_timestamp

# adjust max_connection
pg_patch_max_connections = {
"spec": {
"postgresql": {
"parameters": {
"max_connections": new_max_connections_value
}
}
}
}

try:
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_max_connections)

def get_max_connections():
pods = k8s.api.core_v1.list_namespaced_pod(
'default', label_selector=labels).items
self.assert_master_is_unique()
masterPod = pods[0]
get_max_connections_cmd = '''psql -At -U postgres -c "SELECT setting FROM pg_settings WHERE name = 'max_connections';"'''
result = k8s.exec_with_kubectl(masterPod.metadata.name, get_max_connections_cmd)
max_connections_value = int(result.stdout)
return max_connections_value

#Make sure that max_connections decreased
self.eventuallyEqual(get_max_connections, int(new_max_connections_value), "max_connections didn't decrease")
pods = k8s.api.core_v1.list_namespaced_pod(
'default', label_selector=labels).items
self.assert_master_is_unique()
masterPod = pods[0]
#Make sure that pod didn't restart
self.assertEqual(creationTimestamp, masterPod.metadata.creation_timestamp,
"Master pod creation timestamp is updated")

except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise

# make sure cluster is in a good state for further tests
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2,
"No 2 pods running")

@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_enable_disable_connection_pooler(self):
'''
Expand Down Expand Up @@ -1114,6 +1055,88 @@ def test_overwrite_pooler_deployment(self):
self.eventuallyEqual(lambda: self.k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"),
0, "Pooler pods not scaled down")

@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_patroni_config_update(self):
'''
Change Postgres config under Spec.Postgresql.Parameters and Spec.Patroni
and query Patroni config endpoint to check if manifest changes got applied
via restarting cluster through Patroni's rest api
'''
k8s = self.k8s
masterPod = k8s.get_cluster_leader_pod()
labels = 'application=spilo,cluster-name=acid-minimal-cluster,spilo-role=master'
creationTimestamp = masterPod.metadata.creation_timestamp
new_max_connections_value = "50"

# adjust max_connection
pg_patch_config = {
"spec": {
"postgresql": {
"parameters": {
"max_connections": new_max_connections_value
}
},
"patroni": {
"slots": {
"test_slot": {
"type": "physical"
}
},
"ttl": 29,
"loop_wait": 9,
"retry_timeout": 9,
"synchronous_mode": True
}
}
}

try:
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_config)

self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")

def compare_config():
effective_config = k8s.patroni_rest(masterPod.metadata.name, "config")
desired_patroni = pg_patch_config["spec"]["patroni"]
desired_parameters = pg_patch_config["spec"]["postgresql"]["parameters"]
effective_parameters = effective_config["postgresql"]["parameters"]
self.assertEqual(desired_parameters["max_connections"], effective_parameters["max_connections"],
"max_connections not updated")
self.assertTrue(effective_config["slots"] is not None, "physical replication slot not added")
self.assertEqual(desired_patroni["ttl"], effective_config["ttl"],
"ttl not updated")
self.assertEqual(desired_patroni["loop_wait"], effective_config["loop_wait"],
"loop_wait not updated")
self.assertEqual(desired_patroni["retry_timeout"], effective_config["retry_timeout"],
"retry_timeout not updated")
self.assertEqual(desired_patroni["synchronous_mode"], effective_config["synchronous_mode"],
"synchronous_mode not updated")
return True

self.eventuallyTrue(compare_config, "Postgres config not applied")

setting_query = """
SELECT setting
FROM pg_settings
WHERE name = 'max_connections';
"""
self.eventuallyEqual(lambda: self.query_database(masterPod.metadata.name, "postgres", setting_query)[0], new_max_connections_value,
"New max_connections setting not applied", 10, 5)

# make sure that pod wasn't recreated
self.assertEqual(creationTimestamp, masterPod.metadata.creation_timestamp,
"Master pod creation timestamp is updated")

except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise

# make sure cluster is in a good state for further tests
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2,
"No 2 pods running")

@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_rolling_update_flag(self):
'''
Expand Down
2 changes: 1 addition & 1 deletion pkg/apis/acid.zalan.do/v1/marshal.go
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ func (ps *PostgresStatus) UnmarshalJSON(data []byte) error {
if err != nil {
metaErr := json.Unmarshal(data, &status)
if metaErr != nil {
return fmt.Errorf("Could not parse status: %v; err %v", string(data), metaErr)
return fmt.Errorf("could not parse status: %v; err %v", string(data), metaErr)
}
tmp.PostgresClusterStatus = status
}
Expand Down
Loading