deploy/delete DB for each test
This commit is contained in:
parent
02c2d488bc
commit
b63a11ea31
|
|
@ -11,11 +11,6 @@ readonly cluster_name="kind-test-postgres-operator"
|
|||
# avoid interference with previous test runs
|
||||
if [[ $(kind get clusters | grep "^${cluster_name}*") != "" ]]
|
||||
then
|
||||
# true if variable is set; bash >= v4.2
|
||||
if [[ -v KUBECONFIG ]];then
|
||||
rm "$KUBECONFIG"
|
||||
unset KUBECONFIG
|
||||
fi
|
||||
kind delete cluster --name ${cluster_name}
|
||||
fi
|
||||
|
||||
|
|
@ -23,4 +18,5 @@ kind create cluster --name ${cluster_name} --config ./e2e/kind-config-multikind.
|
|||
export KUBECONFIG="$(kind get kubeconfig-path --name=${cluster_name})"
|
||||
kubectl cluster-info
|
||||
|
||||
python3 -m unittest discover --start-directory e2e/tests/
|
||||
python3 -m unittest discover --start-directory e2e/tests/ &&
|
||||
kind delete cluster --name ${cluster_name}
|
||||
|
|
@ -8,16 +8,20 @@ class SampleTestCase(unittest.TestCase):
|
|||
|
||||
nodes = set(["kind-test-postgres-operator-worker", "kind-test-postgres-operator-worker2", "kind-test-postgres-operator-worker3"])
|
||||
|
||||
config = None
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
|
||||
# deploy operator
|
||||
'''
|
||||
Deploy operator to a "kind" cluster created by /e2e/run.sh using examples from /manifests.
|
||||
This operator deployment is to be shared among all tests of this suit.
|
||||
'''
|
||||
|
||||
_ = config.load_kube_config()
|
||||
k8s_client = client.ApiClient()
|
||||
|
||||
# TODO split into multiple files
|
||||
# HACK
|
||||
# 1. creating RBAC entites with a separate client fails with
|
||||
# "AttributeError: object has no attribute 'select_header_accept'"
|
||||
# 2. utils.create_from_yaml cannot create multiple entites from a single file
|
||||
subprocess.run(["kubectl", "create", "-f", "manifests/operator-service-account-rbac.yaml"])
|
||||
|
||||
for filename in ["configmap.yaml", "postgres-operator.yaml"]:
|
||||
|
|
@ -26,7 +30,6 @@ class SampleTestCase(unittest.TestCase):
|
|||
|
||||
v1 = client.CoreV1Api()
|
||||
pod_phase = None
|
||||
|
||||
while pod_phase != 'Running':
|
||||
pods = v1.list_namespaced_pod('default', label_selector='name=postgres-operator').items
|
||||
if pods:
|
||||
|
|
@ -37,13 +40,34 @@ class SampleTestCase(unittest.TestCase):
|
|||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
apps_v1 = client.AppsV1Api()
|
||||
_ = apps_v1.delete_namespaced_deployment("postgres-operator", "default")
|
||||
'''
|
||||
/e2e/run.sh deletes the 'kind' cluster after successful run along with all operator-related entities.
|
||||
In the case of test failure the cluster will stay to enable manual examination;
|
||||
next invocation of "make e2e" will re-create it.
|
||||
'''
|
||||
pass
|
||||
|
||||
def setUp(self):
|
||||
'''
|
||||
Deploy a new Postgres DB for each test.
|
||||
'''
|
||||
self.config = config.load_kube_config()
|
||||
self.v1 = client.CoreV1Api()
|
||||
|
||||
k8s_client = client.ApiClient()
|
||||
|
||||
# TODO substitue with utils.create_from_yaml and Python client for acid.zalan.do
|
||||
subprocess.run(["kubectl", "create", "-f", "manifests/minimal-postgres-manifest.yaml"])
|
||||
|
||||
pod_phase = None
|
||||
while pod_phase != 'Running':
|
||||
pods = self.v1.list_namespaced_pod('default', label_selector='spilo-role=master').items
|
||||
if pods:
|
||||
operator_pod = pods[0]
|
||||
pod_phase = operator_pod.status.phase
|
||||
print("Waiting for the Spilo master pod to start. Current phase: " + pod_phase)
|
||||
time.sleep(5)
|
||||
|
||||
def test_assign_labels_to_nodes(self):
|
||||
"""
|
||||
Ensure labeling nodes through the externally connected Python client works.
|
||||
|
|
@ -69,18 +93,22 @@ class SampleTestCase(unittest.TestCase):
|
|||
|
||||
def tearDown(self):
|
||||
"""
|
||||
Each test must restore the original cluster state
|
||||
to avoid introducing dependencies between tests
|
||||
Delete the database to avoid introducing dependencies between tests
|
||||
"""
|
||||
body = {
|
||||
"metadata": {
|
||||
"labels": {
|
||||
"lifecycle-status": None # deletes label
|
||||
}
|
||||
}
|
||||
}
|
||||
for node in self.nodes:
|
||||
_ = self.v1.patch_node(node, body)
|
||||
# HACK workaround for #551
|
||||
time.sleep(60)
|
||||
|
||||
_ = config.load_kube_config()
|
||||
crd = client.CustomObjectsApi()
|
||||
body = client.V1DeleteOptions()
|
||||
_ = crd.delete_namespaced_custom_object("acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", body)
|
||||
|
||||
# wait for the pods to be deleted
|
||||
pods = self.v1.list_namespaced_pod('default', label_selector='spilo-role=master').items
|
||||
while pods:
|
||||
pods = self.v1.list_namespaced_pod('default', label_selector='spilo-role=master').items
|
||||
print("Waiting for the database to be deleted.")
|
||||
time.sleep(5)
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
Loading…
Reference in New Issue