enhance e2e test

This commit is contained in:
Rafia Sabih 2020-11-02 14:48:02 +01:00
parent 5685924cdf
commit 65d547f740
1 changed files with 29 additions and 24 deletions

View File

@ -105,7 +105,7 @@ class EndToEndTestCase(unittest.TestCase):
with open("manifests/postgres-operator.yaml", 'r+') as f:
operator_deployment = yaml.safe_load(f)
operator_deployment["spec"]["template"]["spec"]["containers"][0]["image"] = os.environ['OPERATOR_IMAGE']
with open("manifests/postgres-operator.yaml", 'w') as f:
yaml.dump(operator_deployment, f, Dumper=yaml.Dumper)
@ -180,12 +180,14 @@ class EndToEndTestCase(unittest.TestCase):
{
'spec': {
'enableConnectionPooler': True,
'enableReplicaConnectionPooler': True,
}
})
self.eventuallyEqual(lambda: k8s.get_deployment_replica_count(), 2, "Deployment replicas is 2 default")
self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"), 2, "No pooler pods found")
self.eventuallyEqual(lambda: k8s.count_services_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'), 1, "No pooler service found")
self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler-name=acid-minimal-cluster-pooler"), 2, "No pooler pods found")
self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler-name=acid-minimal-cluster-pooler-repl"), 2, "No pooler replica pods found")
self.eventuallyEqual(lambda: k8s.count_services_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'), 2, "No pooler service found")
# scale up connection pooler deployment
k8s.api.custom_objects_api.patch_namespaced_custom_object(
@ -199,8 +201,8 @@ class EndToEndTestCase(unittest.TestCase):
}
})
self.eventuallyEqual(lambda: k8s.get_deployment_replica_count(), 3, "Deployment replicas is scaled to 3")
self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"), 3, "Scale up of pooler pods does not work")
# self.eventuallyEqual(lambda: k8s.get_deployment_replica_count(), 3, "Deployment replicas is scaled to 3")
# self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"), 3, "Scale up of pooler pods does not work")
# turn it off, keeping config should be overwritten by false
k8s.api.custom_objects_api.patch_namespaced_custom_object(
@ -208,11 +210,13 @@ class EndToEndTestCase(unittest.TestCase):
'postgresqls', 'acid-minimal-cluster',
{
'spec': {
'enableConnectionPooler': False
'enableConnectionPooler': False,
'enableReplicaConnectionPooler': False,
}
})
self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"), 0, "Pooler pods not scaled down")
self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler-name=acid-minimal-cluster-pooler"), 0, "Pooler pods not scaled down")
self.eventuallyEqual(lambda: k8s.count_services_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'), 0, "Pooler service not removed")
# Verify that all the databases have pooler schema installed.
@ -273,7 +277,8 @@ class EndToEndTestCase(unittest.TestCase):
'postgresqls', 'acid-minimal-cluster',
{
'spec': {
'connectionPooler': None
'connectionPooler': None,
'EnableReplicaConnectionPooler': False,
}
})
@ -300,7 +305,7 @@ class EndToEndTestCase(unittest.TestCase):
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_lbs)
self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format("master")),
'LoadBalancer',
"Expected LoadBalancer service type for master, found {}")
@ -318,7 +323,7 @@ class EndToEndTestCase(unittest.TestCase):
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_lbs)
self.eventuallyEqual(lambda: k8s.get_service_type(cluster_label.format("master")),
'ClusterIP',
"Expected LoadBalancer service type for master, found {}")
@ -383,7 +388,7 @@ class EndToEndTestCase(unittest.TestCase):
return False
self.eventuallyTrue(verify_role, "infrastructure role setup is not loaded")
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
@ -417,7 +422,7 @@ class EndToEndTestCase(unittest.TestCase):
}
k8s.update_config(patch_lazy_spilo_upgrade, step="Init baseline image version")
self.eventuallyEqual(lambda: k8s.get_statefulset_image(), SPILO_CURRENT, "Stagefulset not updated initially")
self.eventuallyEqual(lambda: k8s.get_statefulset_image(), SPILO_CURRENT, "Stagefulset not updated initially")
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running")
self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod0)), 2, "Postgres status did not enter running")
@ -437,10 +442,10 @@ class EndToEndTestCase(unittest.TestCase):
try:
# restart the pod to get a container with the new image
k8s.api.core_v1.delete_namespaced_pod(pod0, 'default')
k8s.api.core_v1.delete_namespaced_pod(pod0, 'default')
# verify only pod-0 which was deleted got new image from statefulset
self.eventuallyEqual(lambda: k8s.get_effective_pod_image(pod0), conf_image, "Delete pod-0 did not get new spilo image")
self.eventuallyEqual(lambda: k8s.get_effective_pod_image(pod0), conf_image, "Delete pod-0 did not get new spilo image")
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No two pods running after lazy rolling upgrade")
self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod0)), 2, "Postgres status did not enter running")
self.assertNotEqual(lambda: k8s.get_effective_pod_image(pod1), SPILO_CURRENT, "pod-1 should not have change Docker image to {}".format(SPILO_CURRENT))
@ -511,7 +516,7 @@ class EndToEndTestCase(unittest.TestCase):
def get_docker_image():
jobs = k8s.get_logical_backup_job().items
return jobs[0].spec.job_template.spec.template.spec.containers[0].image
self.eventuallyEqual(get_docker_image, image,
"Expected job image {}, found {}".format(image, "{}"))
@ -523,7 +528,7 @@ class EndToEndTestCase(unittest.TestCase):
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_backup)
self.eventuallyEqual(lambda: len(k8s.get_logical_backup_job().items), 0, "failed to create logical backup job")
except timeout_decorator.TimeoutError:
@ -569,13 +574,13 @@ class EndToEndTestCase(unittest.TestCase):
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources)
k8s.patch_statefulset({"metadata":{"annotations":{"zalando-postgres-operator-rolling-update-required": "False"}}})
k8s.update_config(patch_min_resource_limits, "Minimum resource test")
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No two pods running after lazy rolling upgrade")
self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members()), 2, "Postgres status did not enter running")
def verify_pod_limits():
pods = k8s.api.core_v1.list_namespaced_pod('default', label_selector="cluster-name=acid-minimal-cluster,application=spilo").items
if len(pods)<2:
@ -672,10 +677,10 @@ class EndToEndTestCase(unittest.TestCase):
k8s = self.k8s
pod="acid-minimal-cluster-0"
k8s.scale_cluster(3)
k8s.scale_cluster(3)
self.eventuallyEqual(lambda: k8s.count_running_pods(), 3, "Scale up to 3 failed")
self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod)), 3, "Not all 3 nodes healthy")
k8s.scale_cluster(2)
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "Scale down to 2 failed")
self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod)), 2, "Not all members 2 healthy")
@ -746,12 +751,12 @@ class EndToEndTestCase(unittest.TestCase):
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_crd_annotations)
annotations = {
"deployment-time": "2020-04-30 12:00:00",
"downscaler/downtime_replicas": "0",
}
self.eventuallyTrue(lambda: k8s.check_statefulset_annotations(cluster_label, annotations), "Annotations missing")
@ -796,7 +801,7 @@ class EndToEndTestCase(unittest.TestCase):
"toleration": "key:postgres,operator:Exists,effect:NoExecute"
}
}
k8s.update_config(patch_toleration_config, step="allow tainted nodes")
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running")