Move long running test to end. Move pooler test to new functions.

This commit is contained in:
Jan Mußler 2020-10-26 21:38:06 +01:00
parent 8b057d4e43
commit 5294995b19
1 changed files with 37 additions and 54 deletions

View File

@ -152,7 +152,6 @@ class EndToEndTestCase(unittest.TestCase):
pod_selector = to_selector(pod_labels) pod_selector = to_selector(pod_labels)
service_selector = to_selector(service_labels) service_selector = to_selector(service_labels)
try:
# enable connection pooler # enable connection pooler
k8s.api.custom_objects_api.patch_namespaced_custom_object( k8s.api.custom_objects_api.patch_namespaced_custom_object(
'acid.zalan.do', 'v1', 'default', 'acid.zalan.do', 'v1', 'default',
@ -162,24 +161,9 @@ class EndToEndTestCase(unittest.TestCase):
'enableConnectionPooler': True, 'enableConnectionPooler': True,
} }
}) })
k8s.wait_for_pod_start(pod_selector)
pods = k8s.api.core_v1.list_namespaced_pod( self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"), 2, "No pooler pods found")
'default', label_selector=pod_selector self.eventuallyEqual(lambda: k8s.count_services_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'), 1, "No pooler service found")
).items
self.assertTrue(pods, 'No connection pooler pods')
k8s.wait_for_service(service_selector)
services = k8s.api.core_v1.list_namespaced_service(
'default', label_selector=service_selector
).items
services = [
s for s in services
if s.metadata.name.endswith('pooler')
]
self.assertTrue(services, 'No connection pooler service')
# scale up connection pooler deployment # scale up connection pooler deployment
k8s.api.custom_objects_api.patch_namespaced_custom_object( k8s.api.custom_objects_api.patch_namespaced_custom_object(
@ -188,12 +172,12 @@ class EndToEndTestCase(unittest.TestCase):
{ {
'spec': { 'spec': {
'connectionPooler': { 'connectionPooler': {
'numberOfInstances': 2, 'numberOfInstances': 3,
}, },
} }
}) })
k8s.wait_for_running_pods(pod_selector, 2) self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"), 3, "Scale up of pooler pods does not work")
# turn it off, keeping configuration section # turn it off, keeping configuration section
k8s.api.custom_objects_api.patch_namespaced_custom_object( k8s.api.custom_objects_api.patch_namespaced_custom_object(
@ -202,13 +186,12 @@ class EndToEndTestCase(unittest.TestCase):
{ {
'spec': { 'spec': {
'enableConnectionPooler': False, 'enableConnectionPooler': False,
'connectionPooler': None
} }
}) })
k8s.wait_for_pods_to_stop(pod_selector)
except timeout_decorator.TimeoutError: self.eventuallyEqual(lambda: k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"), 0, "Pooler pods not scaled down")
print('Operator log: {}'.format(k8s.get_operator_log())) self.eventuallyEqual(lambda: k8s.count_services_with_label('application=db-connection-pooler,cluster-name=acid-minimal-cluster'), 0, "Pooler service not removed")
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_enable_load_balancer(self): def test_enable_load_balancer(self):
@ -542,7 +525,7 @@ class EndToEndTestCase(unittest.TestCase):
raise raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_node_readiness_label(self): def test_zzz_node_readiness_label(self):
''' '''
Remove node readiness label from master node. This must cause a failover. Remove node readiness label from master node. This must cause a failover.
''' '''
@ -676,7 +659,7 @@ class EndToEndTestCase(unittest.TestCase):
"downscaler/downtime_replicas": "0", "downscaler/downtime_replicas": "0",
} }
self.eventuallyTrue(lambda: k8s.check_statefulset_annotations(cluster_label, annotations)) self.eventuallyTrue(lambda: k8s.check_statefulset_annotations(cluster_label, annotations), "Annotations missing")
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)