merge origin

This commit is contained in:
Sergey Dudoladov 2020-04-30 10:53:32 +02:00
commit a76ce1a25e
1 changed files with 15 additions and 12 deletions

View File

@ -549,7 +549,7 @@ class EndToEndTestCase(unittest.TestCase):
self.assert_running_pods_have_volumes()
# get extra unused pvcs to test Sync
k8s.wait_for_pg_to_scale(4)
k8s.wait_for_pg_to_scale(3)
k8s.wait_for_pg_to_scale(2)
# enable pvc deletion
@ -561,9 +561,8 @@ class EndToEndTestCase(unittest.TestCase):
k8s.update_config(patch)
# Sync() at operator start-up deletes unused pvcs that had existed before
unused_pvcs = ["pgdata-acid-minimal-cluster-2", "pgdata-acid-minimal-cluster-3"]
for pvc in unused_pvcs:
k8s.wait_for_pvc_deletion(pvc)
unused_pvc = "pgdata-acid-minimal-cluster-2"
k8s.wait_for_pvc_deletion(unused_pvc)
self.assert_running_pods_have_volumes()
@ -571,7 +570,7 @@ class EndToEndTestCase(unittest.TestCase):
last_pvc_name = "pgdata-acid-minimal-cluster-0"
volume_before_scaledown = k8s.get_volume_name(last_pvc_name)
k8s.wait_for_pg_to_scale(0)
k8s.update_config(patch) # force a Sync to delete unused PVCs
k8s.restart_operator() # force a Sync to delete unused PVCs
self.assertTrue(k8s.pvc_exist(last_pvc_name), "The last pvc was deleted")
# sanity check
@ -594,7 +593,7 @@ class EndToEndTestCase(unittest.TestCase):
# disablement of the feature actually stops volume deletion
k8s.wait_for_pg_to_scale(2)
self.assert_running_pods_have_volumes()
self.assertTrue(k8s.pvc_exist("pgdata-acid-minimal-cluster-2"),
self.assertTrue(k8s.pvc_exist(unused_pvc),
"The pvc of a shut down pod was deleted despite the feature is disabled")
def get_failover_targets(self, master_node, replica_nodes):
@ -678,11 +677,11 @@ class EndToEndTestCase(unittest.TestCase):
pods = k8s.list_pods(labels)
for pod in pods:
pgdata = [v for v in pod.spec.volumes if v.name == 'pgdata']
self.assertTrue(len(pgdata) > 0, "No pgdata volumes found")
if len(pgdata) > 0:
pvc = pgdata[0].persistent_volume_claim
self.assertTrue(k8s.pvc_exist(pvc.claim_name), "PVC does not exist")
if pod.metadata.labels.get('spilo-role') in ['master', 'replica']:
pgdata = [v for v in pod.spec.volumes if 'pgdata' in v.name]
if len(pgdata) > 0:
pvc = pgdata[0].persistent_volume_claim
self.assertTrue(k8s.pvc_exist(pvc.claim_name), "PVC does not exist")
class K8sApi:
@ -845,7 +844,11 @@ class K8s:
def wait_for_logical_backup_job_creation(self):
self.wait_for_logical_backup_job(expected_num_of_jobs=1)
def delete_operator_pod(self):
def update_config(self, config_map_patch):
self.api.core_v1.patch_namespaced_config_map("postgres-operator", "default", config_map_patch)
self.restart_operator()
def restart_operator(self):
operator_pod = self.api.core_v1.list_namespaced_pod(
'default', label_selector="name=postgres-operator").items[0].metadata.name
self.api.core_v1.delete_namespaced_pod(operator_pod, "default") # restart reloads the conf and issues Sync()