More e2e changes for scale up and down.

This commit is contained in:
Jan Mußler 2020-10-21 17:58:16 +02:00
parent 39641e81ea
commit 6b91bd3282
2 changed files with 42 additions and 63 deletions

View File

@ -317,19 +317,14 @@ class K8sBase:
return False return False
return True return True
def wait_for_pg_to_scale(self, number_of_instances, namespace='default'): def scale_cluster(self, number_of_instances, name="acid-minimal-cluster", namespace="default"):
body = { body = {
"spec": { "spec": {
"numberOfInstances": number_of_instances "numberOfInstances": number_of_instances
} }
} }
_ = self.api.custom_objects_api.patch_namespaced_custom_object( self.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", namespace, "postgresqls", "acid-minimal-cluster", body) "acid.zalan.do", "v1", namespace, "postgresqls", name, body)
labels = 'application=spilo,cluster-name=acid-minimal-cluster'
while self.count_pods_with_label(labels) != number_of_instances:
time.sleep(self.RETRY_TIMEOUT_SEC)
def wait_for_running_pods(self, labels, number, namespace=''): def wait_for_running_pods(self, labels, number, namespace=''):
while self.count_pods_with_label(labels) != number: while self.count_pods_with_label(labels) != number:

View File

@ -574,25 +574,20 @@ class EndToEndTestCase(unittest.TestCase):
Scale up from 2 to 3 and back to 2 pods by updating the Postgres manifest at runtime. Scale up from 2 to 3 and back to 2 pods by updating the Postgres manifest at runtime.
''' '''
k8s = self.k8s k8s = self.k8s
labels = "application=spilo,cluster-name=acid-minimal-cluster" pod="acid-minimal-cluster-0"
try: k8s.scale_cluster(3)
k8s.wait_for_pg_to_scale(3) self.eventuallyEqual(lambda: k8s.count_running_pods(), 3, "Scale up to 3 failed")
self.assertEqual(3, k8s.count_pods_with_label(labels)) self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod)), 3, "Not all 3 nodes healthy")
self.assert_master_is_unique()
k8s.scale_cluster(2)
k8s.wait_for_pg_to_scale(2) self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "Scale down to 2 failed")
self.assertEqual(2, k8s.count_pods_with_label(labels)) self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod)), 2, "Not all members 2 healthy")
self.assert_master_is_unique()
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_service_annotations(self): def test_service_annotations(self):
''' '''
Create a Postgres cluster with service annotations and check them. Create a Postgres cluster with service annotations and check them.
''' '''
k8s = self.k8s k8s = self.k8s
patch_custom_service_annotations = { patch_custom_service_annotations = {
@ -602,32 +597,24 @@ class EndToEndTestCase(unittest.TestCase):
} }
k8s.update_config(patch_custom_service_annotations) k8s.update_config(patch_custom_service_annotations)
try: pg_patch_custom_annotations = {
pg_patch_custom_annotations = { "spec": {
"spec": { "serviceAnnotations": {
"serviceAnnotations": { "annotation.key": "value",
"annotation.key": "value", "foo": "bar",
"foo": "bar",
}
} }
} }
k8s.api.custom_objects_api.patch_namespaced_custom_object( }
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_custom_annotations) k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_custom_annotations)
# wait a little before proceeding annotations = {
time.sleep(30) "annotation.key": "value",
annotations = { "foo": "bar",
"annotation.key": "value", }
"foo": "bar",
}
self.assertTrue(k8s.check_service_annotations(
"cluster-name=acid-minimal-cluster,spilo-role=master", annotations))
self.assertTrue(k8s.check_service_annotations(
"cluster-name=acid-minimal-cluster,spilo-role=replica", annotations))
except timeout_decorator.TimeoutError: self.eventuallyTrue(lambda: k8s.check_service_annotations("cluster-name=acid-minimal-cluster,spilo-role=master", annotations))
print('Operator log: {}'.format(k8s.get_operator_log())) self.eventuallyTrue(lambda: k8s.check_service_annotations("cluster-name=acid-minimal-cluster,spilo-role=replica", annotations))
raise
# clean up # clean up
unpatch_custom_service_annotations = { unpatch_custom_service_annotations = {
@ -652,27 +639,24 @@ class EndToEndTestCase(unittest.TestCase):
} }
k8s.update_config(patch_sset_propagate_annotations) k8s.update_config(patch_sset_propagate_annotations)
try: pg_crd_annotations = {
pg_crd_annotations = { "metadata": {
"metadata": { "annotations": {
"annotations": { "deployment-time": "2020-04-30 12:00:00",
"deployment-time": "2020-04-30 12:00:00", "downscaler/downtime_replicas": "0",
"downscaler/downtime_replicas": "0", },
},
}
} }
k8s.api.custom_objects_api.patch_namespaced_custom_object( }
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_crd_annotations) k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_crd_annotations)
annotations = {
"deployment-time": "2020-04-30 12:00:00", annotations = {
"downscaler/downtime_replicas": "0", "deployment-time": "2020-04-30 12:00:00",
} "downscaler/downtime_replicas": "0",
self.assertTrue(k8s.check_statefulset_annotations(cluster_label, annotations)) }
self.eventuallyTrue(lambda: k8s.check_statefulset_annotations(cluster_label, annotations))
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_taint_based_eviction(self): def test_taint_based_eviction(self):