Update e2e pipeline (#1202)
* clean up after test_multi_namespace test * see the PR description for complete list of changes Co-authored-by: Sergey Dudoladov <sergey.dudoladov@zalando.de>
This commit is contained in:
parent
b379db20ed
commit
e779eab22f
|
|
@ -56,7 +56,19 @@ NOCLEANUP=True ./run.sh main tests.test_e2e.EndToEndTestCase.test_lazy_spilo_upg
|
||||||
|
|
||||||
## Inspecting Kind
|
## Inspecting Kind
|
||||||
|
|
||||||
If you want to inspect Kind/Kubernetes cluster, use the following script to exec into the K8s setup and then use `kubectl`
|
If you want to inspect Kind/Kubernetes cluster, switch `kubeconfig` file and context
|
||||||
|
```bash
|
||||||
|
# save the old config in case you have it
|
||||||
|
export KUBECONFIG_SAVED=$KUBECONFIG
|
||||||
|
|
||||||
|
# use the one created by e2e tests
|
||||||
|
export KUBECONFIG=/tmp/kind-config-postgres-operator-e2e-tests
|
||||||
|
|
||||||
|
# this kubeconfig defines a single context
|
||||||
|
kubectl config use-context kind-postgres-operator-e2e-tests
|
||||||
|
```
|
||||||
|
|
||||||
|
or use the following script to exec into the K8s setup and then use `kubectl`
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./exec_into_env.sh
|
./exec_into_env.sh
|
||||||
|
|
@ -71,6 +83,14 @@ kubectl get pods
|
||||||
./scripts/get_logs.sh
|
./scripts/get_logs.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
|
If you want to inspect the state of the `kind` cluster manually with a single command, add a `context` flag
|
||||||
|
```bash
|
||||||
|
kubectl get pods --context kind-kind
|
||||||
|
```
|
||||||
|
or set the context for a few commands at once
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Cleaning up Kind
|
## Cleaning up Kind
|
||||||
|
|
||||||
To cleanup kind and start fresh
|
To cleanup kind and start fresh
|
||||||
|
|
@ -79,6 +99,12 @@ To cleanup kind and start fresh
|
||||||
e2e/run.sh cleanup
|
e2e/run.sh cleanup
|
||||||
```
|
```
|
||||||
|
|
||||||
|
That also helps in case you see the
|
||||||
|
```
|
||||||
|
ERROR: no nodes found for cluster "postgres-operator-e2e-tests"
|
||||||
|
```
|
||||||
|
that happens when the `kind` cluster was deleted manually but its configuraiton file was not.
|
||||||
|
|
||||||
## Covered use cases
|
## Covered use cases
|
||||||
|
|
||||||
The current tests are all bundled in [`test_e2e.py`](tests/test_e2e.py):
|
The current tests are all bundled in [`test_e2e.py`](tests/test_e2e.py):
|
||||||
|
|
|
||||||
|
|
@ -11,9 +11,11 @@ from datetime import datetime
|
||||||
from kubernetes import client, config
|
from kubernetes import client, config
|
||||||
from kubernetes.client.rest import ApiException
|
from kubernetes.client.rest import ApiException
|
||||||
|
|
||||||
|
|
||||||
def to_selector(labels):
|
def to_selector(labels):
|
||||||
return ",".join(["=".join(l) for l in labels.items()])
|
return ",".join(["=".join(l) for l in labels.items()])
|
||||||
|
|
||||||
|
|
||||||
class K8sApi:
|
class K8sApi:
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
|
@ -210,9 +212,9 @@ class K8s:
|
||||||
def wait_for_logical_backup_job_creation(self):
|
def wait_for_logical_backup_job_creation(self):
|
||||||
self.wait_for_logical_backup_job(expected_num_of_jobs=1)
|
self.wait_for_logical_backup_job(expected_num_of_jobs=1)
|
||||||
|
|
||||||
def delete_operator_pod(self, step="Delete operator deplyment"):
|
def delete_operator_pod(self, step="Delete operator pod"):
|
||||||
operator_pod = self.api.core_v1.list_namespaced_pod('default', label_selector="name=postgres-operator").items[0].metadata.name
|
# patching the pod template in the deployment restarts the operator pod
|
||||||
self.api.apps_v1.patch_namespaced_deployment("postgres-operator","default", {"spec":{"template":{"metadata":{"annotations":{"step":"{}-{}".format(step, time.time())}}}}})
|
self.api.apps_v1.patch_namespaced_deployment("postgres-operator","default", {"spec":{"template":{"metadata":{"annotations":{"step":"{}-{}".format(step, datetime.fromtimestamp(time.time()))}}}}})
|
||||||
self.wait_for_operator_pod_start()
|
self.wait_for_operator_pod_start()
|
||||||
|
|
||||||
def update_config(self, config_map_patch, step="Updating operator deployment"):
|
def update_config(self, config_map_patch, step="Updating operator deployment"):
|
||||||
|
|
@ -241,7 +243,7 @@ class K8s:
|
||||||
|
|
||||||
def get_operator_state(self):
|
def get_operator_state(self):
|
||||||
pod = self.get_operator_pod()
|
pod = self.get_operator_pod()
|
||||||
if pod == None:
|
if pod is None:
|
||||||
return None
|
return None
|
||||||
pod = pod.metadata.name
|
pod = pod.metadata.name
|
||||||
|
|
||||||
|
|
@ -251,7 +253,6 @@ class K8s:
|
||||||
|
|
||||||
return json.loads(r.stdout.decode())
|
return json.loads(r.stdout.decode())
|
||||||
|
|
||||||
|
|
||||||
def get_patroni_running_members(self, pod="acid-minimal-cluster-0"):
|
def get_patroni_running_members(self, pod="acid-minimal-cluster-0"):
|
||||||
result = self.get_patroni_state(pod)
|
result = self.get_patroni_state(pod)
|
||||||
return list(filter(lambda x: "State" in x and x["State"] == "running", result))
|
return list(filter(lambda x: "State" in x and x["State"] == "running", result))
|
||||||
|
|
@ -260,7 +261,7 @@ class K8s:
|
||||||
try:
|
try:
|
||||||
deployment = self.api.apps_v1.read_namespaced_deployment(name, namespace)
|
deployment = self.api.apps_v1.read_namespaced_deployment(name, namespace)
|
||||||
return deployment.spec.replicas
|
return deployment.spec.replicas
|
||||||
except ApiException as e:
|
except ApiException:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def get_statefulset_image(self, label_selector="application=spilo,cluster-name=acid-minimal-cluster", namespace='default'):
|
def get_statefulset_image(self, label_selector="application=spilo,cluster-name=acid-minimal-cluster", namespace='default'):
|
||||||
|
|
@ -463,7 +464,6 @@ class K8sBase:
|
||||||
self.wait_for_logical_backup_job(expected_num_of_jobs=1)
|
self.wait_for_logical_backup_job(expected_num_of_jobs=1)
|
||||||
|
|
||||||
def delete_operator_pod(self, step="Delete operator deplyment"):
|
def delete_operator_pod(self, step="Delete operator deplyment"):
|
||||||
operator_pod = self.api.core_v1.list_namespaced_pod('default', label_selector="name=postgres-operator").items[0].metadata.name
|
|
||||||
self.api.apps_v1.patch_namespaced_deployment("postgres-operator","default", {"spec":{"template":{"metadata":{"annotations":{"step":"{}-{}".format(step, time.time())}}}}})
|
self.api.apps_v1.patch_namespaced_deployment("postgres-operator","default", {"spec":{"template":{"metadata":{"annotations":{"step":"{}-{}".format(step, time.time())}}}}})
|
||||||
self.wait_for_operator_pod_start()
|
self.wait_for_operator_pod_start()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2,15 +2,14 @@ import json
|
||||||
import unittest
|
import unittest
|
||||||
import time
|
import time
|
||||||
import timeout_decorator
|
import timeout_decorator
|
||||||
import subprocess
|
|
||||||
import warnings
|
|
||||||
import os
|
import os
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from kubernetes import client, config
|
from kubernetes import client
|
||||||
|
|
||||||
from tests.k8s_api import K8s
|
from tests.k8s_api import K8s
|
||||||
|
from kubernetes.client.rest import ApiException
|
||||||
|
|
||||||
SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-12:1.6-p5"
|
SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-12:1.6-p5"
|
||||||
SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p114"
|
SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p114"
|
||||||
|
|
@ -89,17 +88,17 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
# remove existing local storage class and create hostpath class
|
# remove existing local storage class and create hostpath class
|
||||||
try:
|
try:
|
||||||
k8s.api.storage_v1_api.delete_storage_class("standard")
|
k8s.api.storage_v1_api.delete_storage_class("standard")
|
||||||
except:
|
except ApiException as e:
|
||||||
print("Storage class has already been remove")
|
print("Failed to delete the 'standard' storage class: {0}".format(e))
|
||||||
|
|
||||||
# operator deploys pod service account there on start up
|
# operator deploys pod service account there on start up
|
||||||
# needed for test_multi_namespace_support()
|
# needed for test_multi_namespace_support()
|
||||||
cls.namespace = "test"
|
cls.test_namespace = "test"
|
||||||
try:
|
try:
|
||||||
v1_namespace = client.V1Namespace(metadata=client.V1ObjectMeta(name=cls.namespace))
|
v1_namespace = client.V1Namespace(metadata=client.V1ObjectMeta(name=cls.test_namespace))
|
||||||
k8s.api.core_v1.create_namespace(v1_namespace)
|
k8s.api.core_v1.create_namespace(v1_namespace)
|
||||||
except:
|
except ApiException as e:
|
||||||
print("Namespace already present")
|
print("Failed to create the '{0}' namespace: {1}".format(cls.test_namespace, e))
|
||||||
|
|
||||||
# submit the most recent operator image built on the Docker host
|
# submit the most recent operator image built on the Docker host
|
||||||
with open("manifests/postgres-operator.yaml", 'r+') as f:
|
with open("manifests/postgres-operator.yaml", 'r+') as f:
|
||||||
|
|
@ -137,8 +136,6 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
# this tackles the problem when kind is reused
|
# this tackles the problem when kind is reused
|
||||||
# and the Docker image is in fact changed (dirty one)
|
# and the Docker image is in fact changed (dirty one)
|
||||||
|
|
||||||
# patch resync period, this can catch some problems with hanging e2e tests
|
|
||||||
# k8s.update_config({"data": {"resync_period":"30s"}},step="TestSuite setup")
|
|
||||||
k8s.update_config({}, step="TestSuite Startup")
|
k8s.update_config({}, step="TestSuite Startup")
|
||||||
|
|
||||||
actual_operator_image = k8s.api.core_v1.list_namespaced_pod(
|
actual_operator_image = k8s.api.core_v1.list_namespaced_pod(
|
||||||
|
|
@ -170,9 +167,6 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
'connection-pooler': 'acid-minimal-cluster-pooler',
|
'connection-pooler': 'acid-minimal-cluster-pooler',
|
||||||
})
|
})
|
||||||
|
|
||||||
pod_selector = to_selector(pod_labels)
|
|
||||||
service_selector = to_selector(service_labels)
|
|
||||||
|
|
||||||
# enable connection pooler
|
# enable connection pooler
|
||||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||||
'acid.zalan.do', 'v1', 'default',
|
'acid.zalan.do', 'v1', 'default',
|
||||||
|
|
@ -604,17 +598,25 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
|
|
||||||
with open("manifests/complete-postgres-manifest.yaml", 'r+') as f:
|
with open("manifests/complete-postgres-manifest.yaml", 'r+') as f:
|
||||||
pg_manifest = yaml.safe_load(f)
|
pg_manifest = yaml.safe_load(f)
|
||||||
pg_manifest["metadata"]["namespace"] = self.namespace
|
pg_manifest["metadata"]["namespace"] = self.test_namespace
|
||||||
yaml.dump(pg_manifest, f, Dumper=yaml.Dumper)
|
yaml.dump(pg_manifest, f, Dumper=yaml.Dumper)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml")
|
k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml")
|
||||||
k8s.wait_for_pod_start("spilo-role=master", self.namespace)
|
k8s.wait_for_pod_start("spilo-role=master", self.test_namespace)
|
||||||
self.assert_master_is_unique(self.namespace, "acid-test-cluster")
|
self.assert_master_is_unique(self.test_namespace, "acid-test-cluster")
|
||||||
|
|
||||||
except timeout_decorator.TimeoutError:
|
except timeout_decorator.TimeoutError:
|
||||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||||
raise
|
raise
|
||||||
|
finally:
|
||||||
|
# delete the new cluster so that the k8s_api.get_operator_state works correctly in subsequent tests
|
||||||
|
# ideally we should delete the 'test' namespace here but
|
||||||
|
# the pods inside the namespace stuck in the Terminating state making the test time out
|
||||||
|
k8s.api.custom_objects_api.delete_namespaced_custom_object(
|
||||||
|
"acid.zalan.do", "v1", self.test_namespace, "postgresqls", "acid-test-cluster")
|
||||||
|
time.sleep(5)
|
||||||
|
|
||||||
|
|
||||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
def test_zz_node_readiness_label(self):
|
def test_zz_node_readiness_label(self):
|
||||||
|
|
|
||||||
|
|
@ -626,14 +626,14 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if oldSpec.Spec.PostgresqlParam.PgVersion >= newSpec.Spec.PostgresqlParam.PgVersion {
|
if oldSpec.Spec.PostgresqlParam.PgVersion > newSpec.Spec.PostgresqlParam.PgVersion {
|
||||||
c.logger.Warningf("postgresql version change(%q -> %q) has no effect",
|
c.logger.Warningf("postgresql version change(%q -> %q) has no effect",
|
||||||
oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion)
|
oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion)
|
||||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "PostgreSQL", "postgresql version change(%q -> %q) has no effect",
|
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "PostgreSQL", "postgresql version change(%q -> %q) has no effect",
|
||||||
oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion)
|
oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion)
|
||||||
// we need that hack to generate statefulset with the old version
|
// we need that hack to generate statefulset with the old version
|
||||||
newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion
|
newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion
|
||||||
} else {
|
} else if oldSpec.Spec.PostgresqlParam.PgVersion < newSpec.Spec.PostgresqlParam.PgVersion {
|
||||||
c.logger.Infof("postgresql version increased (%q -> %q), major version upgrade can be done manually after StatefulSet Sync",
|
c.logger.Infof("postgresql version increased (%q -> %q), major version upgrade can be done manually after StatefulSet Sync",
|
||||||
oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion)
|
oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue