Small changes to e2e.
This commit is contained in:
parent
9e932f51ef
commit
5593c1f485
|
|
@ -13,9 +13,15 @@ kubectl get statefulsets
|
|||
echo
|
||||
kubectl get deployments
|
||||
echo
|
||||
echo
|
||||
echo 'Step from operator deployment'
|
||||
kubectl get pods -l name=postgres-operator -o jsonpath='{.items..metadata.annotations.step}'
|
||||
echo
|
||||
echo
|
||||
echo 'Spilo Image in statefulset'
|
||||
kubectl get pods -l application=spilo -o jsonpath='{.items..spec.containers..image}'
|
||||
echo
|
||||
kubectl exec -it $(kubectl get pods -l name=postgres-operator -o jsonpath='{.items.name}') -- curl localhost:8008/api/status/queue/
|
||||
"
|
||||
echo
|
||||
echo 'Queue Status'
|
||||
kubectl exec -it \$(kubectl get pods -l name=postgres-operator -o jsonpath='{.items..metadata.name}') -- curl localhost:8080/workers/all/status/
|
||||
echo"
|
||||
|
|
@ -239,6 +239,19 @@ class K8s:
|
|||
return []
|
||||
return json.loads(r.stdout.decode())
|
||||
|
||||
def get_operator_state(self):
|
||||
pod = self.get_operator_pod()
|
||||
if pod == None:
|
||||
return None
|
||||
pod = pod.metadata.name
|
||||
|
||||
r = self.exec_with_kubectl(pod, "curl localhost:8080/workers/all/status/")
|
||||
if not r.returncode == 0 or not r.stdout.decode()[0:1]=="{":
|
||||
return None
|
||||
|
||||
return json.loads(r.stdout.decode())
|
||||
|
||||
|
||||
def get_patroni_running_members(self, pod="acid-minimal-cluster-0"):
|
||||
result = self.get_patroni_state(pod)
|
||||
return list(filter(lambda x: "State" in x and x["State"] == "running", result))
|
||||
|
|
|
|||
|
|
@ -347,6 +347,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
},
|
||||
}
|
||||
k8s.update_config(patch_infrastructure_roles)
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0":"idle"}, "Operator does not get in sync")
|
||||
|
||||
try:
|
||||
# check that new roles are represented in the config by requesting the
|
||||
|
|
@ -456,6 +457,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
# so we additonally test if disabling the lazy upgrade - forcing the normal rolling upgrade - works
|
||||
self.eventuallyEqual(lambda: k8s.get_effective_pod_image(pod0), conf_image, "Rolling upgrade was not executed", 50, 3)
|
||||
self.eventuallyEqual(lambda: k8s.get_effective_pod_image(pod1), conf_image, "Rolling upgrade was not executed", 50, 3)
|
||||
self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod0)), 2, "Postgres status did not enter running")
|
||||
|
||||
except timeout_decorator.TimeoutError:
|
||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
|
|
@ -528,6 +530,9 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
raise
|
||||
|
||||
# ensure cluster is healthy after tests
|
||||
self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running")
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_min_resource_limits(self):
|
||||
'''
|
||||
|
|
|
|||
Loading…
Reference in New Issue