More fixes for e2e tests.
This commit is contained in:
		
							parent
							
								
									38e6261d64
								
							
						
					
					
						commit
						ccde8c6bf6
					
				| 
						 | 
					@ -35,6 +35,18 @@ class EndToEndTestCase(unittest.TestCase):
 | 
				
			||||||
                    raise
 | 
					                    raise
 | 
				
			||||||
                time.sleep(interval)
 | 
					                time.sleep(interval)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					    def eventuallyNotEqual(self, f, x, m, retries=25, interval=2):
 | 
				
			||||||
 | 
					        while True:
 | 
				
			||||||
 | 
					            try:
 | 
				
			||||||
 | 
					                y = f()
 | 
				
			||||||
 | 
					                self.assertNotEqual(y, x, m.format(y))
 | 
				
			||||||
 | 
					                return True
 | 
				
			||||||
 | 
					            except AssertionError:
 | 
				
			||||||
 | 
					                retries = retries -1
 | 
				
			||||||
 | 
					                if not retries > 0:
 | 
				
			||||||
 | 
					                    raise
 | 
				
			||||||
 | 
					                time.sleep(interval)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
    def eventuallyTrue(self, f, m, retries=25, interval=2):
 | 
					    def eventuallyTrue(self, f, m, retries=25, interval=2):
 | 
				
			||||||
        while True:
 | 
					        while True:
 | 
				
			||||||
            try:
 | 
					            try:
 | 
				
			||||||
| 
						 | 
					@ -250,12 +262,11 @@ class EndToEndTestCase(unittest.TestCase):
 | 
				
			||||||
        }
 | 
					        }
 | 
				
			||||||
        k8s.update_config(patch_infrastructure_roles)
 | 
					        k8s.update_config(patch_infrastructure_roles)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        # wait a little before proceeding
 | 
					 | 
				
			||||||
        time.sleep(30)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
        try:
 | 
					        try:
 | 
				
			||||||
            # check that new roles are represented in the config by requesting the
 | 
					            # check that new roles are represented in the config by requesting the
 | 
				
			||||||
            # operator configuration via API
 | 
					            # operator configuration via API
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					            def verify_role():
 | 
				
			||||||
                operator_pod = k8s.get_operator_pod()
 | 
					                operator_pod = k8s.get_operator_pod()
 | 
				
			||||||
                get_config_cmd = "wget --quiet -O - localhost:8080/config"
 | 
					                get_config_cmd = "wget --quiet -O - localhost:8080/config"
 | 
				
			||||||
                result = k8s.exec_with_kubectl(operator_pod.metadata.name, get_config_cmd)
 | 
					                result = k8s.exec_with_kubectl(operator_pod.metadata.name, get_config_cmd)
 | 
				
			||||||
| 
						 | 
					@ -263,7 +274,7 @@ class EndToEndTestCase(unittest.TestCase):
 | 
				
			||||||
                            .get("controller", {})
 | 
					                            .get("controller", {})
 | 
				
			||||||
                            .get("InfrastructureRoles"))
 | 
					                            .get("InfrastructureRoles"))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            self.assertTrue("robot_zmon_acid_monitoring_new" in roles_dict)
 | 
					                if "robot_zmon_acid_monitoring_new" in roles_dict:
 | 
				
			||||||
                    role = roles_dict["robot_zmon_acid_monitoring_new"]
 | 
					                    role = roles_dict["robot_zmon_acid_monitoring_new"]
 | 
				
			||||||
                    role.pop("Password", None)
 | 
					                    role.pop("Password", None)
 | 
				
			||||||
                    self.assertDictEqual(role, {
 | 
					                    self.assertDictEqual(role, {
 | 
				
			||||||
| 
						 | 
					@ -274,6 +285,12 @@ class EndToEndTestCase(unittest.TestCase):
 | 
				
			||||||
                        "AdminRole": "",
 | 
					                        "AdminRole": "",
 | 
				
			||||||
                        "Origin": 2,
 | 
					                        "Origin": 2,
 | 
				
			||||||
                    })
 | 
					                    })
 | 
				
			||||||
 | 
					                    return True
 | 
				
			||||||
 | 
					                else:
 | 
				
			||||||
 | 
					                    return False
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					            self.eventuallyTrue(verify_role, "infrastructure role setup is not loaded")
 | 
				
			||||||
 | 
					            
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        except timeout_decorator.TimeoutError:
 | 
					        except timeout_decorator.TimeoutError:
 | 
				
			||||||
            print('Operator log: {}'.format(k8s.get_operator_log()))
 | 
					            print('Operator log: {}'.format(k8s.get_operator_log()))
 | 
				
			||||||
| 
						 | 
					@ -309,17 +326,11 @@ class EndToEndTestCase(unittest.TestCase):
 | 
				
			||||||
        try:
 | 
					        try:
 | 
				
			||||||
            # restart the pod to get a container with the new image
 | 
					            # restart the pod to get a container with the new image
 | 
				
			||||||
            k8s.api.core_v1.delete_namespaced_pod(pod0, 'default')            
 | 
					            k8s.api.core_v1.delete_namespaced_pod(pod0, 'default')            
 | 
				
			||||||
            time.sleep(60)
 | 
					 | 
				
			||||||
            
 | 
					            
 | 
				
			||||||
            # lazy update works if the restarted pod and older pods run different Spilo versions
 | 
					            # verify only pod-0 which was deleted got new image from statefulset
 | 
				
			||||||
            new_image = k8s.get_effective_pod_image(pod0)
 | 
					            self.eventuallyEqual(lambda: k8s.get_effective_pod_image(pod0), conf_image, "Delete pod-0 did not get new spilo image")
 | 
				
			||||||
            old_image = k8s.get_effective_pod_image(pod1)
 | 
					            old_image = k8s.get_effective_pod_image(pod1)
 | 
				
			||||||
            self.assertNotEqual(new_image, old_image,
 | 
					            self.assertNotEqual(conf_image, old_image, "pod-1 should not have change Docker image to {}".format(old_image))
 | 
				
			||||||
                                "Lazy updated failed: pods have the same image {}".format(new_image))
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
            # sanity check
 | 
					 | 
				
			||||||
            assert_msg = "Image {} of a new pod differs from {} in operator conf".format(new_image, conf_image)
 | 
					 | 
				
			||||||
            self.assertEqual(new_image, conf_image, assert_msg)
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
            # clean up
 | 
					            # clean up
 | 
				
			||||||
            unpatch_lazy_spilo_upgrade = {
 | 
					            unpatch_lazy_spilo_upgrade = {
 | 
				
			||||||
| 
						 | 
					@ -332,15 +343,8 @@ class EndToEndTestCase(unittest.TestCase):
 | 
				
			||||||
            # at this point operator will complete the normal rolling upgrade
 | 
					            # at this point operator will complete the normal rolling upgrade
 | 
				
			||||||
            # so we additonally test if disabling the lazy upgrade - forcing the normal rolling upgrade - works
 | 
					            # so we additonally test if disabling the lazy upgrade - forcing the normal rolling upgrade - works
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            # XXX there is no easy way to wait until the end of Sync()
 | 
					            self.eventuallyEqual(lambda: k8s.get_effective_pod_image(pod0), conf_image, "Rolling upgrade was not executed")
 | 
				
			||||||
            time.sleep(60)
 | 
					            self.eventuallyEqual(lambda: k8s.get_effective_pod_image(pod1), conf_image, "Rolling upgrade was not executed")
 | 
				
			||||||
 | 
					 | 
				
			||||||
            image0 = k8s.get_effective_pod_image(pod0)
 | 
					 | 
				
			||||||
            image1 = k8s.get_effective_pod_image(pod1)
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
            assert_msg = "Disabling lazy upgrade failed: pods still have different \
 | 
					 | 
				
			||||||
                images {} and {}".format(image0, image1)
 | 
					 | 
				
			||||||
            self.assertEqual(image0, image1, assert_msg)
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
        except timeout_decorator.TimeoutError:
 | 
					        except timeout_decorator.TimeoutError:
 | 
				
			||||||
            print('Operator log: {}'.format(k8s.get_operator_log()))
 | 
					            print('Operator log: {}'.format(k8s.get_operator_log()))
 | 
				
			||||||
| 
						 | 
					@ -372,12 +376,9 @@ class EndToEndTestCase(unittest.TestCase):
 | 
				
			||||||
            "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_backup)
 | 
					            "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_backup)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
        try:
 | 
					        try:
 | 
				
			||||||
            k8s.wait_for_logical_backup_job_creation()
 | 
					            self.eventuallyEqual(lambda: len(k8s.get_logical_backup_job()), 1, "failed to create logical backup job")
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            jobs = k8s.get_logical_backup_job().items
 | 
					            job = k8s.get_logical_backup_job().items[0]
 | 
				
			||||||
            self.assertEqual(1, len(jobs), "Expected 1 logical backup job, found {}".format(len(jobs)))
 | 
					 | 
				
			||||||
 | 
					 | 
				
			||||||
            job = jobs[0]
 | 
					 | 
				
			||||||
            self.assertEqual(job.metadata.name, "logical-backup-acid-minimal-cluster",
 | 
					            self.assertEqual(job.metadata.name, "logical-backup-acid-minimal-cluster",
 | 
				
			||||||
                             "Expected job name {}, found {}"
 | 
					                             "Expected job name {}, found {}"
 | 
				
			||||||
                             .format("logical-backup-acid-minimal-cluster", job.metadata.name))
 | 
					                             .format("logical-backup-acid-minimal-cluster", job.metadata.name))
 | 
				
			||||||
| 
						 | 
					@ -394,10 +395,12 @@ class EndToEndTestCase(unittest.TestCase):
 | 
				
			||||||
            }
 | 
					            }
 | 
				
			||||||
            k8s.update_config(patch_logical_backup_image)
 | 
					            k8s.update_config(patch_logical_backup_image)
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					            def get_docker_image():
 | 
				
			||||||
                jobs = k8s.get_logical_backup_job().items
 | 
					                jobs = k8s.get_logical_backup_job().items
 | 
				
			||||||
            actual_image = jobs[0].spec.job_template.spec.template.spec.containers[0].image
 | 
					                return jobs[0].spec.job_template.spec.template.spec.containers[0].image
 | 
				
			||||||
            self.assertEqual(actual_image, image,
 | 
					                
 | 
				
			||||||
                             "Expected job image {}, found {}".format(image, actual_image))
 | 
					            self.eventuallyEqual(get_docker_image, image,
 | 
				
			||||||
 | 
					                             "Expected job image {}, found {}".format(image, "{}"))
 | 
				
			||||||
 | 
					
 | 
				
			||||||
            # delete the logical backup cron job
 | 
					            # delete the logical backup cron job
 | 
				
			||||||
            pg_patch_disable_backup = {
 | 
					            pg_patch_disable_backup = {
 | 
				
			||||||
| 
						 | 
					@ -407,10 +410,8 @@ class EndToEndTestCase(unittest.TestCase):
 | 
				
			||||||
            }
 | 
					            }
 | 
				
			||||||
            k8s.api.custom_objects_api.patch_namespaced_custom_object(
 | 
					            k8s.api.custom_objects_api.patch_namespaced_custom_object(
 | 
				
			||||||
                "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_backup)
 | 
					                "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_backup)
 | 
				
			||||||
            k8s.wait_for_logical_backup_job_deletion()
 | 
					            
 | 
				
			||||||
            jobs = k8s.get_logical_backup_job().items
 | 
					            self.eventuallyEqual(lambda: len(self.get_logical_backup_job()), 0, "failed to create logical backup job")
 | 
				
			||||||
            self.assertEqual(0, len(jobs),
 | 
					 | 
				
			||||||
                             "Expected 0 logical backup jobs, found {}".format(len(jobs)))
 | 
					 | 
				
			||||||
 | 
					
 | 
				
			||||||
        except timeout_decorator.TimeoutError:
 | 
					        except timeout_decorator.TimeoutError:
 | 
				
			||||||
            print('Operator log: {}'.format(k8s.get_operator_log()))
 | 
					            print('Operator log: {}'.format(k8s.get_operator_log()))
 | 
				
			||||||
| 
						 | 
					@ -1040,6 +1041,9 @@ class K8s:
 | 
				
			||||||
        '''
 | 
					        '''
 | 
				
			||||||
        pod = self.api.core_v1.list_namespaced_pod(
 | 
					        pod = self.api.core_v1.list_namespaced_pod(
 | 
				
			||||||
            namespace, label_selector="statefulset.kubernetes.io/pod-name=" + pod_name)
 | 
					            namespace, label_selector="statefulset.kubernetes.io/pod-name=" + pod_name)
 | 
				
			||||||
 | 
					        
 | 
				
			||||||
 | 
					        if len(pod.items) == 0:
 | 
				
			||||||
 | 
					            return None
 | 
				
			||||||
        return pod.items[0].spec.containers[0].image
 | 
					        return pod.items[0].spec.containers[0].image
 | 
				
			||||||
 | 
					
 | 
				
			||||||
 | 
					
 | 
				
			||||||
| 
						 | 
					
 | 
				
			||||||
		Loading…
	
		Reference in New Issue