diff --git a/e2e/scripts/watch_objects.sh b/e2e/scripts/watch_objects.sh index d4ca3597f..9dde54f5e 100755 --- a/e2e/scripts/watch_objects.sh +++ b/e2e/scripts/watch_objects.sh @@ -13,9 +13,15 @@ kubectl get statefulsets echo kubectl get deployments echo +echo +echo 'Step from operator deployment' kubectl get pods -l name=postgres-operator -o jsonpath='{.items..metadata.annotations.step}' echo +echo +echo 'Spilo Image in statefulset' kubectl get pods -l application=spilo -o jsonpath='{.items..spec.containers..image}' echo -kubectl exec -it $(kubectl get pods -l name=postgres-operator -o jsonpath='{.items.name}') -- curl localhost:8008/api/status/queue/ -" \ No newline at end of file +echo +echo 'Queue Status' +kubectl exec -it \$(kubectl get pods -l name=postgres-operator -o jsonpath='{.items..metadata.name}') -- curl localhost:8080/workers/all/status/ +echo" \ No newline at end of file diff --git a/e2e/tests/k8s_api.py b/e2e/tests/k8s_api.py index 371fa8e0d..f2abd8e0c 100644 --- a/e2e/tests/k8s_api.py +++ b/e2e/tests/k8s_api.py @@ -239,6 +239,19 @@ class K8s: return [] return json.loads(r.stdout.decode()) + def get_operator_state(self): + pod = self.get_operator_pod() + if pod == None: + return None + pod = pod.metadata.name + + r = self.exec_with_kubectl(pod, "curl localhost:8080/workers/all/status/") + if not r.returncode == 0 or not r.stdout.decode()[0:1]=="{": + return None + + return json.loads(r.stdout.decode()) + + def get_patroni_running_members(self, pod="acid-minimal-cluster-0"): result = self.get_patroni_state(pod) return list(filter(lambda x: "State" in x and x["State"] == "running", result)) diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index a38169a46..5f1bbcac7 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -347,6 +347,7 @@ class EndToEndTestCase(unittest.TestCase): }, } k8s.update_config(patch_infrastructure_roles) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0":"idle"}, "Operator does not get in sync") try: # check that new roles are represented in the config by requesting the @@ -456,6 +457,7 @@ class EndToEndTestCase(unittest.TestCase): # so we additonally test if disabling the lazy upgrade - forcing the normal rolling upgrade - works self.eventuallyEqual(lambda: k8s.get_effective_pod_image(pod0), conf_image, "Rolling upgrade was not executed", 50, 3) self.eventuallyEqual(lambda: k8s.get_effective_pod_image(pod1), conf_image, "Rolling upgrade was not executed", 50, 3) + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod0)), 2, "Postgres status did not enter running") except timeout_decorator.TimeoutError: print('Operator log: {}'.format(k8s.get_operator_log())) @@ -528,6 +530,9 @@ class EndToEndTestCase(unittest.TestCase): print('Operator log: {}'.format(k8s.get_operator_log())) raise + # ensure cluster is healthy after tests + self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running") + @timeout_decorator.timeout(TEST_TIMEOUT_SEC) def test_min_resource_limits(self): '''