add try except for LB switch test

This commit is contained in:
Felix Kunde 2020-08-27 14:51:43 +02:00
parent baedeaf37c
commit 2da270af73
1 changed files with 43 additions and 37 deletions

View File

@ -159,45 +159,50 @@ class EndToEndTestCase(unittest.TestCase):
k8s = self.k8s k8s = self.k8s
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster' cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
# enable load balancer services try:
pg_patch_enable_lbs = { # enable load balancer services
"spec": { pg_patch_enable_lbs = {
"enableMasterLoadBalancer": True, "spec": {
"enableReplicaLoadBalancer": True "enableMasterLoadBalancer": True,
"enableReplicaLoadBalancer": True
}
} }
} k8s.api.custom_objects_api.patch_namespaced_custom_object(
k8s.api.custom_objects_api.patch_namespaced_custom_object( "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_lbs)
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_lbs) # wait for service recreation
# wait for service recreation time.sleep(60)
time.sleep(60)
master_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=master') master_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=master')
self.assertEqual(master_svc_type, 'LoadBalancer', self.assertEqual(master_svc_type, 'LoadBalancer',
"Expected LoadBalancer service type for master, found {}".format(master_svc_type)) "Expected LoadBalancer service type for master, found {}".format(master_svc_type))
repl_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=replica') repl_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=replica')
self.assertEqual(repl_svc_type, 'LoadBalancer', self.assertEqual(repl_svc_type, 'LoadBalancer',
"Expected LoadBalancer service type for replica, found {}".format(repl_svc_type)) "Expected LoadBalancer service type for replica, found {}".format(repl_svc_type))
# disable load balancer services again # disable load balancer services again
pg_patch_disable_lbs = { pg_patch_disable_lbs = {
"spec": { "spec": {
"enableMasterLoadBalancer": False, "enableMasterLoadBalancer": False,
"enableReplicaLoadBalancer": False "enableReplicaLoadBalancer": False
}
} }
} k8s.api.custom_objects_api.patch_namespaced_custom_object(
k8s.api.custom_objects_api.patch_namespaced_custom_object( "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_lbs)
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_lbs) # wait for service recreation
# wait for service recreation time.sleep(60)
time.sleep(60)
master_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=master') master_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=master')
self.assertEqual(master_svc_type, 'ClusterIP', self.assertEqual(master_svc_type, 'ClusterIP',
"Expected ClusterIP service type for master, found {}".format(master_svc_type)) "Expected ClusterIP service type for master, found {}".format(master_svc_type))
repl_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=replica') repl_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=replica')
self.assertEqual(repl_svc_type, 'ClusterIP', self.assertEqual(repl_svc_type, 'ClusterIP',
"Expected ClusterIP service type for replica, found {}".format(repl_svc_type)) "Expected ClusterIP service type for replica, found {}".format(repl_svc_type))
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_lazy_spilo_upgrade(self): def test_lazy_spilo_upgrade(self):
@ -234,7 +239,8 @@ class EndToEndTestCase(unittest.TestCase):
# lazy update works if the restarted pod and older pods run different Spilo versions # lazy update works if the restarted pod and older pods run different Spilo versions
new_image = k8s.get_effective_pod_image(pod0) new_image = k8s.get_effective_pod_image(pod0)
old_image = k8s.get_effective_pod_image(pod1) old_image = k8s.get_effective_pod_image(pod1)
self.assertNotEqual(new_image, old_image, "Lazy updated failed: pods have the same image {}".format(new_image)) self.assertNotEqual(new_image, old_image,
"Lazy updated failed: pods have the same image {}".format(new_image))
# sanity check # sanity check
assert_msg = "Image {} of a new pod differs from {} in operator conf".format(new_image, conf_image) assert_msg = "Image {} of a new pod differs from {} in operator conf".format(new_image, conf_image)
@ -648,8 +654,8 @@ class EndToEndTestCase(unittest.TestCase):
get_config_cmd = "wget --quiet -O - localhost:8080/config" get_config_cmd = "wget --quiet -O - localhost:8080/config"
result = k8s.exec_with_kubectl(operator_pod.metadata.name, get_config_cmd) result = k8s.exec_with_kubectl(operator_pod.metadata.name, get_config_cmd)
roles_dict = (json.loads(result.stdout) roles_dict = (json.loads(result.stdout)
.get("controller", {}) .get("controller", {})
.get("InfrastructureRoles")) .get("InfrastructureRoles"))
self.assertTrue("robot_zmon_acid_monitoring_new" in roles_dict) self.assertTrue("robot_zmon_acid_monitoring_new" in roles_dict)
role = roles_dict["robot_zmon_acid_monitoring_new"] role = roles_dict["robot_zmon_acid_monitoring_new"]
@ -1001,8 +1007,8 @@ class K8s:
def exec_with_kubectl(self, pod, cmd): def exec_with_kubectl(self, pod, cmd):
return subprocess.run(["./exec.sh", pod, cmd], return subprocess.run(["./exec.sh", pod, cmd],
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE) stderr=subprocess.PIPE)
def get_effective_pod_image(self, pod_name, namespace='default'): def get_effective_pod_image(self, pod_name, namespace='default'):
''' '''