fix relevant flake8 violations
This commit is contained in:
parent
b379db20ed
commit
e10b2e31da
|
|
@ -11,9 +11,11 @@ from datetime import datetime
|
||||||
from kubernetes import client, config
|
from kubernetes import client, config
|
||||||
from kubernetes.client.rest import ApiException
|
from kubernetes.client.rest import ApiException
|
||||||
|
|
||||||
|
|
||||||
def to_selector(labels):
|
def to_selector(labels):
|
||||||
return ",".join(["=".join(l) for l in labels.items()])
|
return ",".join(["=".join(l) for l in labels.items()])
|
||||||
|
|
||||||
|
|
||||||
class K8sApi:
|
class K8sApi:
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
|
@ -184,7 +186,7 @@ class K8s:
|
||||||
|
|
||||||
def count_running_pods(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'):
|
def count_running_pods(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'):
|
||||||
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
||||||
return len(list(filter(lambda x: x.status.phase=='Running', pods)))
|
return len(list(filter(lambda x: x.status.phase == 'Running', pods)))
|
||||||
|
|
||||||
def wait_for_pod_failover(self, failover_targets, labels, namespace='default'):
|
def wait_for_pod_failover(self, failover_targets, labels, namespace='default'):
|
||||||
pod_phase = 'Failing over'
|
pod_phase = 'Failing over'
|
||||||
|
|
@ -211,7 +213,6 @@ class K8s:
|
||||||
self.wait_for_logical_backup_job(expected_num_of_jobs=1)
|
self.wait_for_logical_backup_job(expected_num_of_jobs=1)
|
||||||
|
|
||||||
def delete_operator_pod(self, step="Delete operator deplyment"):
|
def delete_operator_pod(self, step="Delete operator deplyment"):
|
||||||
operator_pod = self.api.core_v1.list_namespaced_pod('default', label_selector="name=postgres-operator").items[0].metadata.name
|
|
||||||
self.api.apps_v1.patch_namespaced_deployment("postgres-operator","default", {"spec":{"template":{"metadata":{"annotations":{"step":"{}-{}".format(step, time.time())}}}}})
|
self.api.apps_v1.patch_namespaced_deployment("postgres-operator","default", {"spec":{"template":{"metadata":{"annotations":{"step":"{}-{}".format(step, time.time())}}}}})
|
||||||
self.wait_for_operator_pod_start()
|
self.wait_for_operator_pod_start()
|
||||||
|
|
||||||
|
|
@ -241,7 +242,7 @@ class K8s:
|
||||||
|
|
||||||
def get_operator_state(self):
|
def get_operator_state(self):
|
||||||
pod = self.get_operator_pod()
|
pod = self.get_operator_pod()
|
||||||
if pod == None:
|
if pod is None:
|
||||||
return None
|
return None
|
||||||
pod = pod.metadata.name
|
pod = pod.metadata.name
|
||||||
|
|
||||||
|
|
@ -251,7 +252,6 @@ class K8s:
|
||||||
|
|
||||||
return json.loads(r.stdout.decode())
|
return json.loads(r.stdout.decode())
|
||||||
|
|
||||||
|
|
||||||
def get_patroni_running_members(self, pod="acid-minimal-cluster-0"):
|
def get_patroni_running_members(self, pod="acid-minimal-cluster-0"):
|
||||||
result = self.get_patroni_state(pod)
|
result = self.get_patroni_state(pod)
|
||||||
return list(filter(lambda x: "State" in x and x["State"] == "running", result))
|
return list(filter(lambda x: "State" in x and x["State"] == "running", result))
|
||||||
|
|
@ -260,7 +260,7 @@ class K8s:
|
||||||
try:
|
try:
|
||||||
deployment = self.api.apps_v1.read_namespaced_deployment(name, namespace)
|
deployment = self.api.apps_v1.read_namespaced_deployment(name, namespace)
|
||||||
return deployment.spec.replicas
|
return deployment.spec.replicas
|
||||||
except ApiException as e:
|
except ApiException:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def get_statefulset_image(self, label_selector="application=spilo,cluster-name=acid-minimal-cluster", namespace='default'):
|
def get_statefulset_image(self, label_selector="application=spilo,cluster-name=acid-minimal-cluster", namespace='default'):
|
||||||
|
|
@ -463,7 +463,6 @@ class K8sBase:
|
||||||
self.wait_for_logical_backup_job(expected_num_of_jobs=1)
|
self.wait_for_logical_backup_job(expected_num_of_jobs=1)
|
||||||
|
|
||||||
def delete_operator_pod(self, step="Delete operator deplyment"):
|
def delete_operator_pod(self, step="Delete operator deplyment"):
|
||||||
operator_pod = self.api.core_v1.list_namespaced_pod('default', label_selector="name=postgres-operator").items[0].metadata.name
|
|
||||||
self.api.apps_v1.patch_namespaced_deployment("postgres-operator","default", {"spec":{"template":{"metadata":{"annotations":{"step":"{}-{}".format(step, time.time())}}}}})
|
self.api.apps_v1.patch_namespaced_deployment("postgres-operator","default", {"spec":{"template":{"metadata":{"annotations":{"step":"{}-{}".format(step, time.time())}}}}})
|
||||||
self.wait_for_operator_pod_start()
|
self.wait_for_operator_pod_start()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2,13 +2,11 @@ import json
|
||||||
import unittest
|
import unittest
|
||||||
import time
|
import time
|
||||||
import timeout_decorator
|
import timeout_decorator
|
||||||
import subprocess
|
|
||||||
import warnings
|
|
||||||
import os
|
import os
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from kubernetes import client, config
|
from kubernetes import client
|
||||||
|
|
||||||
from tests.k8s_api import K8s
|
from tests.k8s_api import K8s
|
||||||
|
|
||||||
|
|
@ -170,9 +168,6 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
'connection-pooler': 'acid-minimal-cluster-pooler',
|
'connection-pooler': 'acid-minimal-cluster-pooler',
|
||||||
})
|
})
|
||||||
|
|
||||||
pod_selector = to_selector(pod_labels)
|
|
||||||
service_selector = to_selector(service_labels)
|
|
||||||
|
|
||||||
# enable connection pooler
|
# enable connection pooler
|
||||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||||
'acid.zalan.do', 'v1', 'default',
|
'acid.zalan.do', 'v1', 'default',
|
||||||
|
|
@ -823,14 +818,14 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
k8s.update_config(patch_delete_annotations)
|
k8s.update_config(patch_delete_annotations)
|
||||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0":"idle"}, "Operator does not get in sync")
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# this delete attempt should be omitted because of missing annotations
|
# this delete attempt should be omitted because of missing annotations
|
||||||
k8s.api.custom_objects_api.delete_namespaced_custom_object(
|
k8s.api.custom_objects_api.delete_namespaced_custom_object(
|
||||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster")
|
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster")
|
||||||
time.sleep(5)
|
time.sleep(5)
|
||||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0":"idle"}, "Operator does not get in sync")
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
|
|
||||||
# check that pods and services are still there
|
# check that pods and services are still there
|
||||||
k8s.wait_for_running_pods(cluster_label, 2)
|
k8s.wait_for_running_pods(cluster_label, 2)
|
||||||
|
|
@ -841,7 +836,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
|
|
||||||
# wait a little before proceeding
|
# wait a little before proceeding
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0":"idle"}, "Operator does not get in sync")
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
|
|
||||||
# add annotations to manifest
|
# add annotations to manifest
|
||||||
delete_date = datetime.today().strftime('%Y-%m-%d')
|
delete_date = datetime.today().strftime('%Y-%m-%d')
|
||||||
|
|
@ -855,7 +850,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
}
|
}
|
||||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_delete_annotations)
|
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_delete_annotations)
|
||||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0":"idle"}, "Operator does not get in sync")
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
|
|
||||||
# wait a little before proceeding
|
# wait a little before proceeding
|
||||||
time.sleep(20)
|
time.sleep(20)
|
||||||
|
|
@ -882,7 +877,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||||
raise
|
raise
|
||||||
|
|
||||||
#reset configmap
|
# reset configmap
|
||||||
patch_delete_annotations = {
|
patch_delete_annotations = {
|
||||||
"data": {
|
"data": {
|
||||||
"delete_annotation_date_key": "",
|
"delete_annotation_date_key": "",
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue