print operator log in most tests when they time out

This commit is contained in:
Felix Kunde 2020-08-13 16:49:35 +02:00
parent 0d81f972a1
commit 767f58f37e
1 changed files with 222 additions and 170 deletions

View File

@ -225,6 +225,7 @@ class EndToEndTestCase(unittest.TestCase):
pod0 = 'acid-minimal-cluster-0'
pod1 = 'acid-minimal-cluster-1'
try:
# restart the pod to get a container with the new image
k8s.api.core_v1.delete_namespaced_pod(pod0, 'default')
time.sleep(60)
@ -258,6 +259,10 @@ class EndToEndTestCase(unittest.TestCase):
assert_msg = "Disabling lazy upgrade failed: pods still have different images {} and {}".format(image0, image1)
self.assertEqual(image0, image1, assert_msg)
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_logical_backup_cron_job(self):
'''
@ -282,6 +287,8 @@ class EndToEndTestCase(unittest.TestCase):
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_backup)
try:
k8s.wait_for_logical_backup_job_creation()
jobs = k8s.get_logical_backup_job().items
@ -322,6 +329,10 @@ class EndToEndTestCase(unittest.TestCase):
self.assertEqual(0, len(jobs),
"Expected 0 logical backup jobs, found {}".format(len(jobs)))
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_min_resource_limits(self):
'''
@ -360,6 +371,8 @@ class EndToEndTestCase(unittest.TestCase):
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources)
try:
k8s.wait_for_pod_failover(failover_targets, labels)
k8s.wait_for_pod_start('spilo-role=replica')
@ -375,6 +388,10 @@ class EndToEndTestCase(unittest.TestCase):
"Expected memory limit {}, found {}"
.format(minMemoryLimit, masterPod.spec.containers[0].resources.limits['memory']))
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_multi_namespace_support(self):
'''
@ -387,10 +404,15 @@ class EndToEndTestCase(unittest.TestCase):
pg_manifest["metadata"]["namespace"] = self.namespace
yaml.dump(pg_manifest, f, Dumper=yaml.Dumper)
try:
k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml")
k8s.wait_for_pod_start("spilo-role=master", self.namespace)
self.assert_master_is_unique(self.namespace, "acid-test-cluster")
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_node_readiness_label(self):
'''
@ -401,6 +423,7 @@ class EndToEndTestCase(unittest.TestCase):
readiness_label = 'lifecycle-status'
readiness_value = 'ready'
try:
# get nodes of master and replica(s) (expected target of new master)
current_master_node, current_replica_nodes = k8s.get_pg_nodes(cluster_label)
num_replicas = len(current_replica_nodes)
@ -436,6 +459,10 @@ class EndToEndTestCase(unittest.TestCase):
# toggle pod anti affinity to move replica away from master node
self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label)
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_scaling(self):
'''
@ -444,6 +471,7 @@ class EndToEndTestCase(unittest.TestCase):
k8s = self.k8s
labels = "application=spilo,cluster-name=acid-minimal-cluster"
try:
k8s.wait_for_pg_to_scale(3)
self.assertEqual(3, k8s.count_pods_with_label(labels))
self.assert_master_is_unique()
@ -452,6 +480,10 @@ class EndToEndTestCase(unittest.TestCase):
self.assertEqual(2, k8s.count_pods_with_label(labels))
self.assert_master_is_unique()
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_service_annotations(self):
'''
@ -465,6 +497,7 @@ class EndToEndTestCase(unittest.TestCase):
}
k8s.update_config(patch_custom_service_annotations)
try:
pg_patch_custom_annotations = {
"spec": {
"serviceAnnotations": {
@ -487,6 +520,10 @@ class EndToEndTestCase(unittest.TestCase):
self.assertTrue(k8s.check_service_annotations(
"cluster-name=acid-minimal-cluster,spilo-role=replica", annotations))
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
# clean up
unpatch_custom_service_annotations = {
"data": {
@ -510,6 +547,7 @@ class EndToEndTestCase(unittest.TestCase):
}
k8s.update_config(patch_sset_propagate_annotations)
try:
pg_crd_annotations = {
"metadata": {
"annotations": {
@ -529,6 +567,10 @@ class EndToEndTestCase(unittest.TestCase):
}
self.assertTrue(k8s.check_statefulset_annotations(cluster_label, annotations))
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_taint_based_eviction(self):
'''
@ -554,6 +596,7 @@ class EndToEndTestCase(unittest.TestCase):
}
}
try:
# patch node and test if master is failing over to one of the expected nodes
k8s.api.core_v1.patch_node(current_master_node, body)
new_master_node, new_replica_nodes = self.assert_failover(
@ -573,6 +616,10 @@ class EndToEndTestCase(unittest.TestCase):
# toggle pod anti affinity to move replica away from master node
self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label)
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_infrastructure_roles(self):
'''
@ -593,6 +640,7 @@ class EndToEndTestCase(unittest.TestCase):
# wait a little before proceeding
time.sleep(30)
try:
# check that new roles are represented in the config by requesting the
# operator configuration via API
operator_pod = k8s.get_operator_pod()
@ -614,6 +662,10 @@ class EndToEndTestCase(unittest.TestCase):
"Origin": 2,
})
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
def get_failover_targets(self, master_node, replica_nodes):
'''
If all pods live on the same node, failover will happen to other worker(s)