minor changes to e2e test
This commit is contained in:
parent
3bed6bce6d
commit
6847a710cf
|
|
@ -1426,13 +1426,8 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
via restarting cluster through Patroni's rest api
|
via restarting cluster through Patroni's rest api
|
||||||
'''
|
'''
|
||||||
k8s = self.k8s
|
k8s = self.k8s
|
||||||
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
|
masterPod = k8s.get_cluster_leader_pod()
|
||||||
labels = 'spilo-role=master,' + cluster_label
|
labels = 'application=spilo,cluster-name=acid-minimal-cluster,spilo-role=master'
|
||||||
new_max_connections_value = "50"
|
|
||||||
pods = k8s.api.core_v1.list_namespaced_pod(
|
|
||||||
'default', label_selector=labels).items
|
|
||||||
self.assert_master_is_unique()
|
|
||||||
masterPod = pods[0]
|
|
||||||
creationTimestamp = masterPod.metadata.creation_timestamp
|
creationTimestamp = masterPod.metadata.creation_timestamp
|
||||||
|
|
||||||
# adjust max_connection
|
# adjust max_connection
|
||||||
|
|
@ -1440,7 +1435,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
"spec": {
|
"spec": {
|
||||||
"postgresql": {
|
"postgresql": {
|
||||||
"parameters": {
|
"parameters": {
|
||||||
"max_connections": new_max_connections_value
|
"max_connections": "50"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"patroni": {
|
"patroni": {
|
||||||
|
|
@ -1456,6 +1451,8 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_config)
|
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_config)
|
||||||
|
|
||||||
|
|
@ -1468,7 +1465,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
effective_parameters = effective_config["postgresql"]["parameters"]
|
effective_parameters = effective_config["postgresql"]["parameters"]
|
||||||
self.assertEqual(desired_parameters["max_connections"], effective_parameters["max_connections"],
|
self.assertEqual(desired_parameters["max_connections"], effective_parameters["max_connections"],
|
||||||
"max_connectoins not updated")
|
"max_connectoins not updated")
|
||||||
self.assertTrue(effective_config["slots"] is not None)
|
self.assertTrue(effective_config["slots"] is not None, "physical replication slot not added")
|
||||||
self.assertEqual(desired_patroni["ttl"], effective_config["ttl"],
|
self.assertEqual(desired_patroni["ttl"], effective_config["ttl"],
|
||||||
"ttl not updated")
|
"ttl not updated")
|
||||||
self.assertEqual(desired_patroni["loop_wait"], effective_config["loop_wait"],
|
self.assertEqual(desired_patroni["loop_wait"], effective_config["loop_wait"],
|
||||||
|
|
@ -1479,15 +1476,23 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
"synchronous_mode not updated")
|
"synchronous_mode not updated")
|
||||||
return True
|
return True
|
||||||
|
|
||||||
# make sure that max_connections decreased
|
|
||||||
self.eventuallyTrue(compare_config, "Postgres config not applied")
|
self.eventuallyTrue(compare_config, "Postgres config not applied")
|
||||||
pods = k8s.api.core_v1.list_namespaced_pod(
|
pods = k8s.api.core_v1.list_namespaced_pod(
|
||||||
'default', label_selector=labels).items
|
'default', label_selector=labels).items
|
||||||
|
|
||||||
# make sure that Postgres was not restarted in Pod
|
# make sure that pod wasn't recreated
|
||||||
self.assertEqual(creationTimestamp, masterPod.metadata.creation_timestamp,
|
self.assertEqual(creationTimestamp, masterPod.metadata.creation_timestamp,
|
||||||
"Master pod creation timestamp is updated")
|
"Master pod creation timestamp is updated")
|
||||||
|
|
||||||
|
except timeout_decorator.TimeoutError:
|
||||||
|
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||||
|
raise
|
||||||
|
|
||||||
|
# make sure cluster is in a good state for further tests
|
||||||
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
|
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2,
|
||||||
|
"No 2 pods running")
|
||||||
|
|
||||||
def get_failover_targets(self, master_node, replica_nodes):
|
def get_failover_targets(self, master_node, replica_nodes):
|
||||||
'''
|
'''
|
||||||
If all pods live on the same node, failover will happen to other worker(s)
|
If all pods live on the same node, failover will happen to other worker(s)
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue