Check for observedGeneration in e2e

Signed-off-by: Mikkel Oscar Lyderik Larsen <mikkel.larsen@zalando.de>
This commit is contained in:
Mikkel Oscar Lyderik Larsen 2025-12-05 19:50:49 +01:00
parent f4e901b74c
commit 7129282c2a
2 changed files with 42 additions and 6 deletions

View File

@ -92,10 +92,13 @@ class K8s:
namespace='default'
)
def pg_get_status(self, name="acid-minimal-cluster", namespace="default"):
def pg_get(self, name="acid-minimal-cluster", namespace="default"):
pg = self.api.custom_objects_api.get_namespaced_custom_object(
"acid.zalan.do", "v1", namespace, "postgresqls", name)
return pg.get("status", {}).get("PostgresClusterStatus", None)
return pg
def pg_get_status(self, name="acid-minimal-cluster", namespace="default"):
return pg_get(self, name, namespace).get("status", {})
def wait_for_pod_start(self, pod_labels, namespace='default'):
pod_phase = 'No pod running'

View File

@ -71,6 +71,19 @@ class EndToEndTestCase(unittest.TestCase):
raise
time.sleep(interval)
def eventuallyTrueFunc(self, f, xf, m, retries=60, interval=2):
while True:
try:
y = f()
x = xf(y)
self.assertTrue(xf(y), m)
return True
except AssertionError:
retries = retries - 1
if not retries > 0:
raise
time.sleep(interval)
@classmethod
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def setUpClass(cls):
@ -559,7 +572,7 @@ class EndToEndTestCase(unittest.TestCase):
pg_patch_config["spec"]["patroni"]["slots"][slot_to_change]["database"] = "bar"
del pg_patch_config["spec"]["patroni"]["slots"][slot_to_remove]
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_delete_slot_patch)
@ -576,7 +589,7 @@ class EndToEndTestCase(unittest.TestCase):
self.eventuallyEqual(lambda: self.query_database(leader.metadata.name, "postgres", get_slot_query%("database", slot_to_change))[0], "bar",
"The replication slot cannot be updated", 10, 5)
# make sure slot from Patroni didn't get deleted
self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "postgres", get_slot_query%("slot_name", patroni_slot))), 1,
"The replication slot from Patroni gets deleted", 10, 5)
@ -1670,6 +1683,13 @@ class EndToEndTestCase(unittest.TestCase):
self.eventuallyEqual(lambda: k8s.get_deployment_replica_count(name=pooler_name), 2,
"Operator did not succeed in overwriting labels")
# status observedGeneration should match metadata.generation
self.eventuallyTrueFunc(
lambda: k8s.pg_get(),
lambda pg: pg.get("metadata", {}).get("generation", 0) == pg.get("status", {}).get("observedGeneration", -1),
"Expected generation and status.observedGeneration to match",
)
k8s.api.custom_objects_api.patch_namespaced_custom_object(
'acid.zalan.do', 'v1', 'default',
'postgresqls', 'acid-minimal-cluster',
@ -1683,6 +1703,13 @@ class EndToEndTestCase(unittest.TestCase):
self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler="+pooler_name),
0, "Pooler pods not scaled down")
# status observedGeneration should match metadata.generation
self.eventuallyTrueFunc(
lambda: k8s.pg_get(),
lambda pg: pg.get("metadata", {}).get("generation", 0) == pg.get("status", {}).get("observedGeneration", -1),
"Expected generation and status.observedGeneration to match",
)
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_owner_references(self):
'''
@ -2022,7 +2049,7 @@ class EndToEndTestCase(unittest.TestCase):
# pod_label_wait_timeout should have been exceeded hence the rolling update is continued on next sync
# check if the cluster state is "SyncFailed"
self.eventuallyEqual(lambda: k8s.pg_get_status(), "SyncFailed", "Expected SYNC event to fail")
self.eventuallyEqual(lambda: k8s.pg_get_status(), {"PostgresClusterStatus": "SyncFailed"}, "Expected SYNC event to fail")
# wait for next sync, replica should be running normally by now and be ready for switchover
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
@ -2037,7 +2064,13 @@ class EndToEndTestCase(unittest.TestCase):
# status should again be "SyncFailed" but turn into "Running" on the next sync
time.sleep(30)
self.eventuallyEqual(lambda: k8s.pg_get_status(), "Running", "Expected running cluster after two syncs")
self.eventuallyEqual(lambda: k8s.pg_get_status(), {"PostgresClusterStatus": "Running"}, "Expected running cluster after two syncs")
# status observedGeneration should match metadata.generation
self.eventuallyTrueFunc(
lambda: k8s.pg_get(),
lambda pg: pg.get("metadata", {}).get("generation", 0) == pg.get("status", {}).get("observedGeneration", -1),
"Expected generation and status.observedGeneration to match",
)
# revert config changes
patch_resync_config = {