fix relevant flake8 violations

This commit is contained in:
Sergey Dudoladov 2020-11-09 08:16:17 +01:00
parent b379db20ed
commit e10b2e31da
2 changed files with 17 additions and 23 deletions

View File

@ -11,9 +11,11 @@ from datetime import datetime
from kubernetes import client, config
from kubernetes.client.rest import ApiException
def to_selector(labels):
return ",".join(["=".join(l) for l in labels.items()])
class K8sApi:
def __init__(self):
@ -181,10 +183,10 @@ class K8s:
def count_pdbs_with_label(self, labels, namespace='default'):
return len(self.api.policy_v1_beta1.list_namespaced_pod_disruption_budget(
namespace, label_selector=labels).items)
def count_running_pods(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'):
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
return len(list(filter(lambda x: x.status.phase=='Running', pods)))
return len(list(filter(lambda x: x.status.phase == 'Running', pods)))
def wait_for_pod_failover(self, failover_targets, labels, namespace='default'):
pod_phase = 'Failing over'
@ -211,7 +213,6 @@ class K8s:
self.wait_for_logical_backup_job(expected_num_of_jobs=1)
def delete_operator_pod(self, step="Delete operator deplyment"):
operator_pod = self.api.core_v1.list_namespaced_pod('default', label_selector="name=postgres-operator").items[0].metadata.name
self.api.apps_v1.patch_namespaced_deployment("postgres-operator","default", {"spec":{"template":{"metadata":{"annotations":{"step":"{}-{}".format(step, time.time())}}}}})
self.wait_for_operator_pod_start()
@ -241,7 +242,7 @@ class K8s:
def get_operator_state(self):
pod = self.get_operator_pod()
if pod == None:
if pod is None:
return None
pod = pod.metadata.name
@ -251,7 +252,6 @@ class K8s:
return json.loads(r.stdout.decode())
def get_patroni_running_members(self, pod="acid-minimal-cluster-0"):
result = self.get_patroni_state(pod)
return list(filter(lambda x: "State" in x and x["State"] == "running", result))
@ -260,9 +260,9 @@ class K8s:
try:
deployment = self.api.apps_v1.read_namespaced_deployment(name, namespace)
return deployment.spec.replicas
except ApiException as e:
except ApiException:
return None
def get_statefulset_image(self, label_selector="application=spilo,cluster-name=acid-minimal-cluster", namespace='default'):
ssets = self.api.apps_v1.list_namespaced_stateful_set(namespace, label_selector=label_selector, limit=1)
if len(ssets.items) == 0:
@ -463,7 +463,6 @@ class K8sBase:
self.wait_for_logical_backup_job(expected_num_of_jobs=1)
def delete_operator_pod(self, step="Delete operator deplyment"):
operator_pod = self.api.core_v1.list_namespaced_pod('default', label_selector="name=postgres-operator").items[0].metadata.name
self.api.apps_v1.patch_namespaced_deployment("postgres-operator","default", {"spec":{"template":{"metadata":{"annotations":{"step":"{}-{}".format(step, time.time())}}}}})
self.wait_for_operator_pod_start()
@ -521,7 +520,7 @@ class K8sOperator(K8sBase):
class K8sPostgres(K8sBase):
def __init__(self, labels="cluster-name=acid-minimal-cluster", namespace="default"):
super().__init__(labels, namespace)
def get_pg_nodes(self):
master_pod_node = ''
replica_pod_nodes = []
@ -532,4 +531,4 @@ class K8sPostgres(K8sBase):
elif pod.metadata.labels.get('spilo-role') == 'replica':
replica_pod_nodes.append(pod.spec.node_name)
return master_pod_node, replica_pod_nodes
return master_pod_node, replica_pod_nodes

View File

@ -2,13 +2,11 @@ import json
import unittest
import time
import timeout_decorator
import subprocess
import warnings
import os
import yaml
from datetime import datetime
from kubernetes import client, config
from kubernetes import client
from tests.k8s_api import K8s
@ -170,9 +168,6 @@ class EndToEndTestCase(unittest.TestCase):
'connection-pooler': 'acid-minimal-cluster-pooler',
})
pod_selector = to_selector(pod_labels)
service_selector = to_selector(service_labels)
# enable connection pooler
k8s.api.custom_objects_api.patch_namespaced_custom_object(
'acid.zalan.do', 'v1', 'default',
@ -746,12 +741,12 @@ class EndToEndTestCase(unittest.TestCase):
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_crd_annotations)
annotations = {
"deployment-time": "2020-04-30 12:00:00",
"downscaler/downtime_replicas": "0",
}
self.eventuallyTrue(lambda: k8s.check_statefulset_annotations(cluster_label, annotations), "Annotations missing")
@ -823,14 +818,14 @@ class EndToEndTestCase(unittest.TestCase):
}
}
k8s.update_config(patch_delete_annotations)
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0":"idle"}, "Operator does not get in sync")
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
try:
# this delete attempt should be omitted because of missing annotations
k8s.api.custom_objects_api.delete_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster")
time.sleep(5)
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0":"idle"}, "Operator does not get in sync")
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
# check that pods and services are still there
k8s.wait_for_running_pods(cluster_label, 2)
@ -841,7 +836,7 @@ class EndToEndTestCase(unittest.TestCase):
# wait a little before proceeding
time.sleep(10)
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0":"idle"}, "Operator does not get in sync")
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
# add annotations to manifest
delete_date = datetime.today().strftime('%Y-%m-%d')
@ -855,7 +850,7 @@ class EndToEndTestCase(unittest.TestCase):
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_delete_annotations)
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0":"idle"}, "Operator does not get in sync")
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
# wait a little before proceeding
time.sleep(20)
@ -882,7 +877,7 @@ class EndToEndTestCase(unittest.TestCase):
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
#reset configmap
# reset configmap
patch_delete_annotations = {
"data": {
"delete_annotation_date_key": "",