update kind and use with old storage class (#1121)

* update kind and use with old storage class
* specify standard storage class in minimal manifest
* remove existing local storage class in kind
* fix pod distribution test
* exclude k8s master from nodes of interest
This commit is contained in:
Felix Kunde 2020-08-28 12:16:37 +02:00 committed by GitHub
parent 248ce9fc78
commit 30c86758a3
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 26 additions and 16 deletions

View File

@ -42,7 +42,7 @@ push: docker
tools: docker
# install pinned version of 'kind'
GO111MODULE=on go get sigs.k8s.io/kind@v0.5.1
GO111MODULE=on go get sigs.k8s.io/kind@v0.8.1
e2etest:
./run.sh

View File

@ -35,25 +35,22 @@ function start_kind(){
kind delete cluster --name ${cluster_name}
fi
export KUBECONFIG="${kubeconfig_path}"
kind create cluster --name ${cluster_name} --config kind-cluster-postgres-operator-e2e-tests.yaml
kind load docker-image "${operator_image}" --name ${cluster_name}
kind load docker-image "${e2e_test_image}" --name ${cluster_name}
KUBECONFIG="$(kind get kubeconfig-path --name=${cluster_name})"
export KUBECONFIG
}
function set_kind_api_server_ip(){
# use the actual kubeconfig to connect to the 'kind' API server
# but update the IP address of the API server to the one from the Docker 'bridge' network
cp "${KUBECONFIG}" /tmp
readonly local kind_api_server_port=6443 # well-known in the 'kind' codebase
readonly local kind_api_server=$(docker inspect --format "{{ .NetworkSettings.IPAddress }}:${kind_api_server_port}" "${cluster_name}"-control-plane)
readonly local kind_api_server=$(docker inspect --format "{{ .NetworkSettings.Networks.kind.IPAddress }}:${kind_api_server_port}" "${cluster_name}"-control-plane)
sed -i "s/server.*$/server: https:\/\/$kind_api_server/g" "${kubeconfig_path}"
}
function run_tests(){
docker run --rm --mount type=bind,source="$(readlink -f ${kubeconfig_path})",target=/root/.kube/config -e OPERATOR_IMAGE="${operator_image}" "${e2e_test_image}"
docker run --rm --network kind --mount type=bind,source="$(readlink -f ${kubeconfig_path})",target=/root/.kube/config -e OPERATOR_IMAGE="${operator_image}" "${e2e_test_image}"
}
function clean_up(){

View File

@ -38,6 +38,9 @@ class EndToEndTestCase(unittest.TestCase):
# set a single K8s wrapper for all tests
k8s = cls.k8s = K8s()
# remove existing local storage class and create hostpath class
k8s.api.storage_v1_api.delete_storage_class("standard")
# operator deploys pod service account there on start up
# needed for test_multi_namespace_support()
cls.namespace = "test"
@ -54,7 +57,8 @@ class EndToEndTestCase(unittest.TestCase):
"configmap.yaml",
"postgres-operator.yaml",
"infrastructure-roles.yaml",
"infrastructure-roles-new.yaml"]:
"infrastructure-roles-new.yaml",
"e2e-storage-class.yaml"]:
result = k8s.create_with_kubectl("manifests/" + filename)
print("stdout: {}, stderr: {}".format(result.stdout, result.stderr))
@ -600,8 +604,8 @@ class EndToEndTestCase(unittest.TestCase):
get_config_cmd = "wget --quiet -O - localhost:8080/config"
result = k8s.exec_with_kubectl(operator_pod.metadata.name, get_config_cmd)
roles_dict = (json.loads(result.stdout)
.get("controller", {})
.get("InfrastructureRoles"))
.get("controller", {})
.get("InfrastructureRoles"))
self.assertTrue("robot_zmon_acid_monitoring_new" in roles_dict)
role = roles_dict["robot_zmon_acid_monitoring_new"]
@ -685,12 +689,13 @@ class EndToEndTestCase(unittest.TestCase):
If all pods live on the same node, failover will happen to other worker(s)
'''
k8s = self.k8s
k8s_master_exclusion = 'kubernetes.io/hostname!=postgres-operator-e2e-tests-control-plane'
failover_targets = [x for x in replica_nodes if x != master_node]
if len(failover_targets) == 0:
nodes = k8s.api.core_v1.list_node()
nodes = k8s.api.core_v1.list_node(label_selector=k8s_master_exclusion)
for n in nodes.items:
if "node-role.kubernetes.io/master" not in n.metadata.labels and n.metadata.name != master_node:
if n.metadata.name != master_node:
failover_targets.append(n.metadata.name)
return failover_targets
@ -738,8 +743,7 @@ class EndToEndTestCase(unittest.TestCase):
}
}
k8s.update_config(patch_enable_antiaffinity)
self.assert_failover(
master_node, len(replica_nodes), failover_targets, cluster_label)
self.assert_failover(master_node, len(replica_nodes), failover_targets, cluster_label)
# now disable pod anti affintiy again which will cause yet another failover
patch_disable_antiaffinity = {
@ -767,6 +771,7 @@ class K8sApi:
self.batch_v1_beta1 = client.BatchV1beta1Api()
self.custom_objects_api = client.CustomObjectsApi()
self.policy_v1_beta1 = client.PolicyV1beta1Api()
self.storage_v1_api = client.StorageV1Api()
class K8s:
@ -944,8 +949,8 @@ class K8s:
def exec_with_kubectl(self, pod, cmd):
return subprocess.run(["./exec.sh", pod, cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def get_effective_pod_image(self, pod_name, namespace='default'):
'''

View File

@ -0,0 +1,8 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
namespace: kube-system
name: standard
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: kubernetes.io/host-path