use cluster-name as default label everywhere (#782)

* use cluster-name as default label everywhere
* fix e2e test
This commit is contained in:
Felix Kunde 2020-02-19 15:01:01 +01:00 committed by GitHub
parent 54796945f6
commit 742d7334a1
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 16 additions and 16 deletions

View File

@ -55,7 +55,7 @@ configKubernetes:
# additional labels assigned to the cluster objects # additional labels assigned to the cluster objects
cluster_labels: application:spilo cluster_labels: application:spilo
# label assigned to Kubernetes objects created by the operator # label assigned to Kubernetes objects created by the operator
cluster_name_label: version cluster_name_label: cluster-name
# annotations attached to each database pod # annotations attached to each database pod
# custom_pod_annotations: "keya:valuea,keyb:valueb" # custom_pod_annotations: "keya:valuea,keyb:valueb"

View File

@ -65,7 +65,7 @@ our test cluster.
```bash ```bash
# get name of master pod of acid-minimal-cluster # get name of master pod of acid-minimal-cluster
export PGMASTER=$(kubectl get pods -o jsonpath={.items..metadata.name} -l application=spilo,version=acid-minimal-cluster,spilo-role=master) export PGMASTER=$(kubectl get pods -o jsonpath={.items..metadata.name} -l application=spilo,cluster-name=acid-minimal-cluster,spilo-role=master)
# set up port forward # set up port forward
kubectl port-forward $PGMASTER 6432:5432 kubectl port-forward $PGMASTER 6432:5432

View File

@ -65,7 +65,7 @@ class EndToEndTestCase(unittest.TestCase):
''' '''
k8s = self.k8s k8s = self.k8s
cluster_label = 'version=acid-minimal-cluster' cluster_label = 'cluster-name=acid-minimal-cluster'
# enable load balancer services # enable load balancer services
pg_patch_enable_lbs = { pg_patch_enable_lbs = {
@ -113,7 +113,7 @@ class EndToEndTestCase(unittest.TestCase):
Lower resource limits below configured minimum and let operator fix it Lower resource limits below configured minimum and let operator fix it
''' '''
k8s = self.k8s k8s = self.k8s
cluster_label = 'version=acid-minimal-cluster' cluster_label = 'cluster-name=acid-minimal-cluster'
_, failover_targets = k8s.get_pg_nodes(cluster_label) _, failover_targets = k8s.get_pg_nodes(cluster_label)
# configure minimum boundaries for CPU and memory limits # configure minimum boundaries for CPU and memory limits
@ -172,7 +172,7 @@ class EndToEndTestCase(unittest.TestCase):
k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml") k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml")
k8s.wait_for_pod_start("spilo-role=master", self.namespace) k8s.wait_for_pod_start("spilo-role=master", self.namespace)
self.assert_master_is_unique(self.namespace, version="acid-test-cluster") self.assert_master_is_unique(self.namespace, "acid-test-cluster")
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_scaling(self): def test_scaling(self):
@ -180,7 +180,7 @@ class EndToEndTestCase(unittest.TestCase):
Scale up from 2 to 3 and back to 2 pods by updating the Postgres manifest at runtime. Scale up from 2 to 3 and back to 2 pods by updating the Postgres manifest at runtime.
''' '''
k8s = self.k8s k8s = self.k8s
labels = "version=acid-minimal-cluster" labels = "cluster-name=acid-minimal-cluster"
k8s.wait_for_pg_to_scale(3) k8s.wait_for_pg_to_scale(3)
self.assertEqual(3, k8s.count_pods_with_label(labels)) self.assertEqual(3, k8s.count_pods_with_label(labels))
@ -196,7 +196,7 @@ class EndToEndTestCase(unittest.TestCase):
Add taint "postgres=:NoExecute" to node with master. This must cause a failover. Add taint "postgres=:NoExecute" to node with master. This must cause a failover.
''' '''
k8s = self.k8s k8s = self.k8s
cluster_label = 'version=acid-minimal-cluster' cluster_label = 'cluster-name=acid-minimal-cluster'
# get nodes of master and replica(s) (expected target of new master) # get nodes of master and replica(s) (expected target of new master)
current_master_node, failover_targets = k8s.get_pg_nodes(cluster_label) current_master_node, failover_targets = k8s.get_pg_nodes(cluster_label)
@ -334,9 +334,9 @@ class EndToEndTestCase(unittest.TestCase):
"foo": "bar", "foo": "bar",
} }
self.assertTrue(k8s.check_service_annotations( self.assertTrue(k8s.check_service_annotations(
"version=acid-service-annotations,spilo-role=master", annotations)) "cluster-name=acid-service-annotations,spilo-role=master", annotations))
self.assertTrue(k8s.check_service_annotations( self.assertTrue(k8s.check_service_annotations(
"version=acid-service-annotations,spilo-role=replica", annotations)) "cluster-name=acid-service-annotations,spilo-role=replica", annotations))
# clean up # clean up
unpatch_custom_service_annotations = { unpatch_custom_service_annotations = {
@ -346,14 +346,14 @@ class EndToEndTestCase(unittest.TestCase):
} }
k8s.update_config(unpatch_custom_service_annotations) k8s.update_config(unpatch_custom_service_annotations)
def assert_master_is_unique(self, namespace='default', version="acid-minimal-cluster"): def assert_master_is_unique(self, namespace='default', clusterName="acid-minimal-cluster"):
''' '''
Check that there is a single pod in the k8s cluster with the label "spilo-role=master" Check that there is a single pod in the k8s cluster with the label "spilo-role=master"
To be called manually after operations that affect pods To be called manually after operations that affect pods
''' '''
k8s = self.k8s k8s = self.k8s
labels = 'spilo-role=master,version=' + version labels = 'spilo-role=master,cluster-name=' + clusterName
num_of_master_pods = k8s.count_pods_with_label(labels, namespace) num_of_master_pods = k8s.count_pods_with_label(labels, namespace)
self.assertEqual(num_of_master_pods, 1, "Expected 1 master pod, found {}".format(num_of_master_pods)) self.assertEqual(num_of_master_pods, 1, "Expected 1 master pod, found {}".format(num_of_master_pods))
@ -438,7 +438,7 @@ class K8s:
_ = self.api.custom_objects_api.patch_namespaced_custom_object( _ = self.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", namespace, "postgresqls", "acid-minimal-cluster", body) "acid.zalan.do", "v1", namespace, "postgresqls", "acid-minimal-cluster", body)
labels = 'version=acid-minimal-cluster' labels = 'cluster-name=acid-minimal-cluster'
while self.count_pods_with_label(labels) != number_of_instances: while self.count_pods_with_label(labels) != number_of_instances:
time.sleep(self.RETRY_TIMEOUT_SEC) time.sleep(self.RETRY_TIMEOUT_SEC)
@ -448,7 +448,7 @@ class K8s:
def wait_for_master_failover(self, expected_master_nodes, namespace='default'): def wait_for_master_failover(self, expected_master_nodes, namespace='default'):
pod_phase = 'Failing over' pod_phase = 'Failing over'
new_master_node = '' new_master_node = ''
labels = 'spilo-role=master,version=acid-minimal-cluster' labels = 'spilo-role=master,cluster-name=acid-minimal-cluster'
while (pod_phase != 'Running') or (new_master_node not in expected_master_nodes): while (pod_phase != 'Running') or (new_master_node not in expected_master_nodes):
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items

View File

@ -10,7 +10,7 @@ data:
cluster_domain: cluster.local cluster_domain: cluster.local
cluster_history_entries: "1000" cluster_history_entries: "1000"
cluster_labels: application:spilo cluster_labels: application:spilo
cluster_name_label: version cluster_name_label: cluster-name
# custom_service_annotations: "keyx:valuez,keya:valuea" # custom_service_annotations: "keyx:valuez,keya:valuea"
# custom_pod_annotations: "keya:valuea,keyb:valueb" # custom_pod_annotations: "keya:valuea,keyb:valueb"
db_hosted_zone: db.example.com db_hosted_zone: db.example.com

View File

@ -1498,8 +1498,8 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
) )
labels := map[string]string{ labels := map[string]string{
"version": c.Name, c.OpConfig.ClusterNameLabel: c.Name,
"application": "spilo-logical-backup", "application": "spilo-logical-backup",
} }
podAffinityTerm := v1.PodAffinityTerm{ podAffinityTerm := v1.PodAffinityTerm{
LabelSelector: &metav1.LabelSelector{ LabelSelector: &metav1.LabelSelector{