Fixing yaml dump. Removing restart pending between tests.

This commit is contained in:
Jan Mußler 2020-10-23 01:09:02 +02:00
parent aa3100ca34
commit d88e62fc79
3 changed files with 28 additions and 1 deletions

View File

@ -3,7 +3,15 @@
watch -c "
kubectl get postgresql
echo
echo -n 'Rolling upgrade pending: '
kubectl get statefulset -o jsonpath='{.items..metadata.annotations.zalando-postgres-operator-rolling-update-required}'
echo
echo
kubectl get pods
echo
kubectl get statefulsets
echo
kubectl get pods -l name=postgres-operator -o jsonpath='{.items..metadata.annotations.step}'
echo
kubectl get pods -l application=spilo -o jsonpath='{.items..spec.containers..image}'
"

View File

@ -75,6 +75,11 @@ class K8s:
namespace='default'
)
def pg_get_status(self, name="acid-minimal-cluster", namespace="default"):
pg = self.api.custom_objects_api.get_namespaced_custom_object(
"acid.zalan.do", "v1", namespace, "postgresqls", name)
return pg.get("status", {}).get("PostgresClusterStatus", None)
def wait_for_pod_start(self, pod_labels, namespace='default'):
pod_phase = 'No pod running'
while pod_phase != 'Running':
@ -194,6 +199,9 @@ class K8s:
self.api.core_v1.patch_namespaced_config_map("postgres-operator", "default", config_map_patch)
self.delete_operator_pod(step=step)
def patch_statefulset(self, data, name="acid-minimal-cluster", namespace="default"):
self.api.apps_v1.patch_namespaced_stateful_set(name, namespace, data)
def create_with_kubectl(self, path):
return subprocess.run(
["kubectl", "apply", "-f", path],

View File

@ -97,6 +97,8 @@ class EndToEndTestCase(unittest.TestCase):
with open("manifests/postgres-operator.yaml", 'r+') as f:
operator_deployment = yaml.safe_load(f)
operator_deployment["spec"]["template"]["spec"]["containers"][0]["image"] = os.environ['OPERATOR_IMAGE']
with open("manifests/postgres-operator.yaml", 'w') as f:
yaml.dump(operator_deployment, f, Dumper=yaml.Dumper)
for filename in ["operator-service-account-rbac.yaml",
@ -465,6 +467,7 @@ class EndToEndTestCase(unittest.TestCase):
Lower resource limits below configured minimum and let operator fix it
'''
k8s = self.k8s
# self.eventuallyEqual(lambda: k8s.pg_get_status(), "Running", "Cluster not healthy at start")
# configure minimum boundaries for CPU and memory limits
minCPULimit = '503m'
@ -476,7 +479,6 @@ class EndToEndTestCase(unittest.TestCase):
"min_memory_limit": minMemoryLimit
}
}
k8s.update_config(patch_min_resource_limits)
# lower resource limits below minimum
pg_patch_resources = {
@ -495,6 +497,9 @@ class EndToEndTestCase(unittest.TestCase):
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources)
k8s.patch_statefulset({"metadata":{"annotations":{"zalando-postgres-operator-rolling-update-required": "False"}}})
k8s.update_config(patch_min_resource_limits, "Minimum resource test")
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No two pods running after lazy rolling upgrade")
self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members()), 2, "Postgres status did not enter running")
@ -509,6 +514,12 @@ class EndToEndTestCase(unittest.TestCase):
self.eventuallyTrue(verify_pod_limits, "Pod limits where not adjusted")
@classmethod
def setUp(cls):
# cls.k8s.update_config({}, step="Setup")
cls.k8s.patch_statefulset({"meta":{"annotations":{"zalando-postgres-operator-rolling-update-required": False}}})
pass
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_multi_namespace_support(self):
'''