use cluster-name as clusterNameLabel everywhere

This commit is contained in:
Felix Kunde 2019-11-29 11:43:46 +01:00
parent 87677c62a5
commit f1cb6aa866
4 changed files with 22 additions and 22 deletions

View File

@ -10,9 +10,9 @@ from kubernetes import client, config
class EndToEndTestCase(unittest.TestCase): class EndToEndTestCase(unittest.TestCase):
''' """
Test interaction of the operator with multiple K8s components. Test interaction of the operator with multiple K8s components.
''' """
# `kind` pods may stuck in the `Terminating` phase for a few minutes; hence high test timeout # `kind` pods may stuck in the `Terminating` phase for a few minutes; hence high test timeout
TEST_TIMEOUT_SEC = 600 TEST_TIMEOUT_SEC = 600
@ -20,14 +20,14 @@ class EndToEndTestCase(unittest.TestCase):
@classmethod @classmethod
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def setUpClass(cls): def setUpClass(cls):
''' """
Deploy operator to a "kind" cluster created by run.sh using examples from /manifests. Deploy operator to a "kind" cluster created by run.sh using examples from /manifests.
This operator deployment is to be shared among all tests. This operator deployment is to be shared among all tests.
run.sh deletes the 'kind' cluster after successful run along with all operator-related entities. run.sh deletes the 'kind' cluster after successful run along with all operator-related entities.
In the case of test failure the cluster will stay to enable manual examination; In the case of test failure the cluster will stay to enable manual examination;
next invocation of "make test" will re-create it. next invocation of "make test" will re-create it.
''' """
# set a single K8s wrapper for all tests # set a single K8s wrapper for all tests
k8s = cls.k8s = K8s() k8s = cls.k8s = K8s()
@ -57,9 +57,9 @@ class EndToEndTestCase(unittest.TestCase):
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_multi_namespace_support(self): def test_multi_namespace_support(self):
''' """
Create a customized Postgres cluster in a non-default namespace. Create a customized Postgres cluster in a non-default namespace.
''' """
k8s = self.k8s k8s = self.k8s
with open("manifests/complete-postgres-manifest.yaml", 'r+') as f: with open("manifests/complete-postgres-manifest.yaml", 'r+') as f:
@ -69,16 +69,16 @@ class EndToEndTestCase(unittest.TestCase):
k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml") k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml")
k8s.wait_for_pod_start("spilo-role=master", self.namespace) k8s.wait_for_pod_start("spilo-role=master", self.namespace)
self.assert_master_is_unique(self.namespace, version="acid-test-cluster") self.assert_master_is_unique(self.namespace, clusterName="acid-test-cluster")
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_scaling(self): def test_scaling(self):
""" """
Scale up from 2 to 3 and back to 2 pods by updating the Postgres manifest at runtime. Scale up from 2 to 3 and back to 2 pods by updating the Postgres manifest at runtime.
""" """
k8s = self.k8s k8s = self.k8s
labels = "version=acid-minimal-cluster" labels = "cluster-name=acid-minimal-cluster"
k8s.wait_for_pg_to_scale(3) k8s.wait_for_pg_to_scale(3)
self.assertEqual(3, k8s.count_pods_with_label(labels)) self.assertEqual(3, k8s.count_pods_with_label(labels))
@ -91,10 +91,10 @@ class EndToEndTestCase(unittest.TestCase):
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_taint_based_eviction(self): def test_taint_based_eviction(self):
""" """
Add taint "postgres=:NoExecute" to node with master. This must cause a failover. Add taint "postgres=:NoExecute" to node with master. This must cause a failover.
""" """
k8s = self.k8s k8s = self.k8s
cluster_label = 'version=acid-minimal-cluster' cluster_label = 'cluster-name=acid-minimal-cluster'
# get nodes of master and replica(s) (expected target of new master) # get nodes of master and replica(s) (expected target of new master)
current_master_node, failover_targets = k8s.get_pg_nodes(cluster_label) current_master_node, failover_targets = k8s.get_pg_nodes(cluster_label)
@ -212,14 +212,14 @@ class EndToEndTestCase(unittest.TestCase):
self.assertEqual(0, len(jobs), self.assertEqual(0, len(jobs),
"Expected 0 logical backup jobs, found {}".format(len(jobs))) "Expected 0 logical backup jobs, found {}".format(len(jobs)))
def assert_master_is_unique(self, namespace='default', version="acid-minimal-cluster"): def assert_master_is_unique(self, namespace='default', clusterName="acid-minimal-cluster"):
""" """
Check that there is a single pod in the k8s cluster with the label "spilo-role=master" Check that there is a single pod in the k8s cluster with the label "spilo-role=master"
To be called manually after operations that affect pods To be called manually after operations that affect pods
""" """
k8s = self.k8s k8s = self.k8s
labels = 'spilo-role=master,version=' + version labels = 'spilo-role=master,cluster-name=' + clusterName
num_of_master_pods = k8s.count_pods_with_label(labels, namespace) num_of_master_pods = k8s.count_pods_with_label(labels, namespace)
self.assertEqual(num_of_master_pods, 1, "Expected 1 master pod, found {}".format(num_of_master_pods)) self.assertEqual(num_of_master_pods, 1, "Expected 1 master pod, found {}".format(num_of_master_pods))
@ -242,9 +242,9 @@ class K8sApi:
class K8s: class K8s:
''' """
Wraps around K8 api client and helper methods. Wraps around K8 api client and helper methods.
''' """
RETRY_TIMEOUT_SEC = 5 RETRY_TIMEOUT_SEC = 5
@ -287,7 +287,7 @@ class K8s:
_ = self.api.custom_objects_api.patch_namespaced_custom_object( _ = self.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", namespace, "postgresqls", "acid-minimal-cluster", body) "acid.zalan.do", "v1", namespace, "postgresqls", "acid-minimal-cluster", body)
labels = 'version=acid-minimal-cluster' labels = 'cluster-name=acid-minimal-cluster'
while self.count_pods_with_label(labels) != number_of_instances: while self.count_pods_with_label(labels) != number_of_instances:
time.sleep(self.RETRY_TIMEOUT_SEC) time.sleep(self.RETRY_TIMEOUT_SEC)
@ -297,7 +297,7 @@ class K8s:
def wait_for_master_failover(self, expected_master_nodes, namespace='default'): def wait_for_master_failover(self, expected_master_nodes, namespace='default'):
pod_phase = 'Failing over' pod_phase = 'Failing over'
new_master_node = '' new_master_node = ''
labels = 'spilo-role=master,version=acid-minimal-cluster' labels = 'spilo-role=master,cluster-name=acid-minimal-cluster'
while (pod_phase != 'Running') or (new_master_node not in expected_master_nodes): while (pod_phase != 'Running') or (new_master_node not in expected_master_nodes):
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items

View File

@ -10,7 +10,7 @@ data:
cluster_domain: cluster.local cluster_domain: cluster.local
cluster_history_entries: "1000" cluster_history_entries: "1000"
cluster_labels: application:spilo cluster_labels: application:spilo
cluster_name_label: version cluster_name_label: cluster-name
# custom_service_annotations: "keyx:valuez,keya:valuea" # custom_service_annotations: "keyx:valuez,keya:valuea"
# custom_pod_annotations: "keya:valuea,keyb:valueb" # custom_pod_annotations: "keya:valuea,keyb:valueb"
db_hosted_zone: db.example.com db_hosted_zone: db.example.com

View File

@ -22,7 +22,7 @@ configuration:
cluster_domain: cluster.local cluster_domain: cluster.local
cluster_labels: cluster_labels:
application: spilo application: spilo
cluster_name_label: cluster-name cluster_name_label: version
# custom_pod_annotations: # custom_pod_annotations:
# keya: valuea # keya: valuea
# keyb: valueb # keyb: valueb

View File

@ -137,7 +137,7 @@ def read_pods(cluster, namespace, spilo_cluster):
cluster=cluster, cluster=cluster,
resource_type='pods', resource_type='pods',
namespace=namespace, namespace=namespace,
label_selector={'version': spilo_cluster}, label_selector={'cluster-name': spilo_cluster},
) )