merge master

This commit is contained in:
Sergey Dudoladov 2020-08-31 16:02:29 +02:00
commit 999150f60e
8 changed files with 425 additions and 278 deletions

View File

@ -45,7 +45,7 @@ push: docker
tools:
# install pinned version of 'kind'
GO111MODULE=on go get sigs.k8s.io/kind@v0.5.1
GO111MODULE=on go get sigs.k8s.io/kind@v0.8.1
e2etest: tools copy clean
./run.sh

View File

@ -33,19 +33,17 @@ function start_kind(){
kind delete cluster --name ${cluster_name}
fi
export KUBECONFIG="${kubeconfig_path}"
kind create cluster --name ${cluster_name} --config kind-cluster-postgres-operator-e2e-tests.yaml
kind load docker-image "${operator_image}" --name ${cluster_name}
kind load docker-image "${e2e_test_image}" --name ${cluster_name}
KUBECONFIG="$(kind get kubeconfig-path --name=${cluster_name})"
export KUBECONFIG
}
function set_kind_api_server_ip(){
# use the actual kubeconfig to connect to the 'kind' API server
# but update the IP address of the API server to the one from the Docker 'bridge' network
cp "${KUBECONFIG}" /tmp
readonly local kind_api_server_port=6443 # well-known in the 'kind' codebase
readonly local kind_api_server=$(docker inspect --format "{{ .NetworkSettings.IPAddress }}:${kind_api_server_port}" "${cluster_name}"-control-plane)
readonly local kind_api_server=$(docker inspect --format "{{ .NetworkSettings.Networks.kind.IPAddress }}:${kind_api_server_port}" "${cluster_name}"-control-plane)
sed -i "s/server.*$/server: https:\/\/$kind_api_server/g" "${kubeconfig_path}"
}

View File

@ -38,6 +38,9 @@ class EndToEndTestCase(unittest.TestCase):
# set a single K8s wrapper for all tests
k8s = cls.k8s = K8s()
# remove existing local storage class and create hostpath class
k8s.api.storage_v1_api.delete_storage_class("standard")
# operator deploys pod service account there on start up
# needed for test_multi_namespace_support()
cls.namespace = "test"
@ -54,7 +57,8 @@ class EndToEndTestCase(unittest.TestCase):
"configmap.yaml",
"postgres-operator.yaml",
"infrastructure-roles.yaml",
"infrastructure-roles-new.yaml"]:
"infrastructure-roles-new.yaml",
"e2e-storage-class.yaml"]:
result = k8s.create_with_kubectl("manifests/" + filename)
print("stdout: {}, stderr: {}".format(result.stdout, result.stderr))
@ -159,45 +163,96 @@ class EndToEndTestCase(unittest.TestCase):
k8s = self.k8s
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
# enable load balancer services
pg_patch_enable_lbs = {
"spec": {
"enableMasterLoadBalancer": True,
"enableReplicaLoadBalancer": True
try:
# enable load balancer services
pg_patch_enable_lbs = {
"spec": {
"enableMasterLoadBalancer": True,
"enableReplicaLoadBalancer": True
}
}
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_lbs)
# wait for service recreation
time.sleep(60)
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_lbs)
# wait for service recreation
time.sleep(60)
master_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=master')
self.assertEqual(master_svc_type, 'LoadBalancer',
"Expected LoadBalancer service type for master, found {}".format(master_svc_type))
master_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=master')
self.assertEqual(master_svc_type, 'LoadBalancer',
"Expected LoadBalancer service type for master, found {}".format(master_svc_type))
repl_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=replica')
self.assertEqual(repl_svc_type, 'LoadBalancer',
"Expected LoadBalancer service type for replica, found {}".format(repl_svc_type))
repl_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=replica')
self.assertEqual(repl_svc_type, 'LoadBalancer',
"Expected LoadBalancer service type for replica, found {}".format(repl_svc_type))
# disable load balancer services again
pg_patch_disable_lbs = {
"spec": {
"enableMasterLoadBalancer": False,
"enableReplicaLoadBalancer": False
# disable load balancer services again
pg_patch_disable_lbs = {
"spec": {
"enableMasterLoadBalancer": False,
"enableReplicaLoadBalancer": False
}
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_lbs)
# wait for service recreation
time.sleep(60)
master_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=master')
self.assertEqual(master_svc_type, 'ClusterIP',
"Expected ClusterIP service type for master, found {}".format(master_svc_type))
repl_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=replica')
self.assertEqual(repl_svc_type, 'ClusterIP',
"Expected ClusterIP service type for replica, found {}".format(repl_svc_type))
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_infrastructure_roles(self):
'''
Test using external secrets for infrastructure roles
'''
k8s = self.k8s
# update infrastructure roles description
secret_name = "postgresql-infrastructure-roles"
roles = "secretname: postgresql-infrastructure-roles-new, userkey: user, rolekey: memberof, passwordkey: password, defaultrolevalue: robot_zmon"
patch_infrastructure_roles = {
"data": {
"infrastructure_roles_secret_name": secret_name,
"infrastructure_roles_secrets": roles,
},
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_lbs)
# wait for service recreation
time.sleep(60)
k8s.update_config(patch_infrastructure_roles)
master_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=master')
self.assertEqual(master_svc_type, 'ClusterIP',
"Expected ClusterIP service type for master, found {}".format(master_svc_type))
# wait a little before proceeding
time.sleep(30)
repl_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=replica')
self.assertEqual(repl_svc_type, 'ClusterIP',
"Expected ClusterIP service type for replica, found {}".format(repl_svc_type))
try:
# check that new roles are represented in the config by requesting the
# operator configuration via API
operator_pod = k8s.get_operator_pod()
get_config_cmd = "wget --quiet -O - localhost:8080/config"
result = k8s.exec_with_kubectl(operator_pod.metadata.name, get_config_cmd)
roles_dict = (json.loads(result.stdout)
.get("controller", {})
.get("InfrastructureRoles"))
self.assertTrue("robot_zmon_acid_monitoring_new" in roles_dict)
role = roles_dict["robot_zmon_acid_monitoring_new"]
role.pop("Password", None)
self.assertDictEqual(role, {
"Name": "robot_zmon_acid_monitoring_new",
"Flags": None,
"MemberOf": ["robot_zmon"],
"Parameters": None,
"AdminRole": "",
"Origin": 2,
})
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_lazy_spilo_upgrade(self):
@ -226,38 +281,44 @@ class EndToEndTestCase(unittest.TestCase):
pod0 = 'acid-minimal-cluster-0'
pod1 = 'acid-minimal-cluster-1'
# restart the pod to get a container with the new image
k8s.api.core_v1.delete_namespaced_pod(pod0, 'default')
time.sleep(60)
try:
# restart the pod to get a container with the new image
k8s.api.core_v1.delete_namespaced_pod(pod0, 'default')
time.sleep(60)
# lazy update works if the restarted pod and older pods run different Spilo versions
new_image = k8s.get_effective_pod_image(pod0)
old_image = k8s.get_effective_pod_image(pod1)
self.assertNotEqual(new_image, old_image, "Lazy updated failed: pods have the same image {}".format(new_image))
# lazy update works if the restarted pod and older pods run different Spilo versions
new_image = k8s.get_effective_pod_image(pod0)
old_image = k8s.get_effective_pod_image(pod1)
self.assertNotEqual(new_image, old_image,
"Lazy updated failed: pods have the same image {}".format(new_image))
# sanity check
assert_msg = "Image {} of a new pod differs from {} in operator conf".format(new_image, conf_image)
self.assertEqual(new_image, conf_image, assert_msg)
# sanity check
assert_msg = "Image {} of a new pod differs from {} in operator conf".format(new_image, conf_image)
self.assertEqual(new_image, conf_image, assert_msg)
# clean up
unpatch_lazy_spilo_upgrade = {
"data": {
"enable_lazy_spilo_upgrade": "false",
# clean up
unpatch_lazy_spilo_upgrade = {
"data": {
"enable_lazy_spilo_upgrade": "false",
}
}
}
k8s.update_config(unpatch_lazy_spilo_upgrade)
k8s.update_config(unpatch_lazy_spilo_upgrade)
# at this point operator will complete the normal rolling upgrade
# so we additonally test if disabling the lazy upgrade - forcing the normal rolling upgrade - works
# at this point operator will complete the normal rolling upgrade
# so we additonally test if disabling the lazy upgrade - forcing the normal rolling upgrade - works
# XXX there is no easy way to wait until the end of Sync()
time.sleep(60)
# XXX there is no easy way to wait until the end of Sync()
time.sleep(60)
image0 = k8s.get_effective_pod_image(pod0)
image1 = k8s.get_effective_pod_image(pod1)
image0 = k8s.get_effective_pod_image(pod0)
image1 = k8s.get_effective_pod_image(pod1)
assert_msg = "Disabling lazy upgrade failed: pods still have different images {} and {}".format(image0, image1)
self.assertEqual(image0, image1, assert_msg)
assert_msg = "Disabling lazy upgrade failed: pods still have different images {} and {}".format(image0, image1)
self.assertEqual(image0, image1, assert_msg)
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_logical_backup_cron_job(self):
@ -283,45 +344,51 @@ class EndToEndTestCase(unittest.TestCase):
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_backup)
k8s.wait_for_logical_backup_job_creation()
jobs = k8s.get_logical_backup_job().items
self.assertEqual(1, len(jobs), "Expected 1 logical backup job, found {}".format(len(jobs)))
try:
k8s.wait_for_logical_backup_job_creation()
job = jobs[0]
self.assertEqual(job.metadata.name, "logical-backup-acid-minimal-cluster",
"Expected job name {}, found {}"
.format("logical-backup-acid-minimal-cluster", job.metadata.name))
self.assertEqual(job.spec.schedule, schedule,
"Expected {} schedule, found {}"
.format(schedule, job.spec.schedule))
jobs = k8s.get_logical_backup_job().items
self.assertEqual(1, len(jobs), "Expected 1 logical backup job, found {}".format(len(jobs)))
# update the cluster-wide image of the logical backup pod
image = "test-image-name"
patch_logical_backup_image = {
"data": {
"logical_backup_docker_image": image,
job = jobs[0]
self.assertEqual(job.metadata.name, "logical-backup-acid-minimal-cluster",
"Expected job name {}, found {}"
.format("logical-backup-acid-minimal-cluster", job.metadata.name))
self.assertEqual(job.spec.schedule, schedule,
"Expected {} schedule, found {}"
.format(schedule, job.spec.schedule))
# update the cluster-wide image of the logical backup pod
image = "test-image-name"
patch_logical_backup_image = {
"data": {
"logical_backup_docker_image": image,
}
}
}
k8s.update_config(patch_logical_backup_image)
k8s.update_config(patch_logical_backup_image)
jobs = k8s.get_logical_backup_job().items
actual_image = jobs[0].spec.job_template.spec.template.spec.containers[0].image
self.assertEqual(actual_image, image,
"Expected job image {}, found {}".format(image, actual_image))
jobs = k8s.get_logical_backup_job().items
actual_image = jobs[0].spec.job_template.spec.template.spec.containers[0].image
self.assertEqual(actual_image, image,
"Expected job image {}, found {}".format(image, actual_image))
# delete the logical backup cron job
pg_patch_disable_backup = {
"spec": {
"enableLogicalBackup": False,
# delete the logical backup cron job
pg_patch_disable_backup = {
"spec": {
"enableLogicalBackup": False,
}
}
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_backup)
k8s.wait_for_logical_backup_job_deletion()
jobs = k8s.get_logical_backup_job().items
self.assertEqual(0, len(jobs),
"Expected 0 logical backup jobs, found {}".format(len(jobs)))
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_backup)
k8s.wait_for_logical_backup_job_deletion()
jobs = k8s.get_logical_backup_job().items
self.assertEqual(0, len(jobs),
"Expected 0 logical backup jobs, found {}".format(len(jobs)))
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_min_resource_limits(self):
@ -361,20 +428,26 @@ class EndToEndTestCase(unittest.TestCase):
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources)
k8s.wait_for_pod_failover(failover_targets, labels)
k8s.wait_for_pod_start('spilo-role=replica')
pods = k8s.api.core_v1.list_namespaced_pod(
'default', label_selector=labels).items
self.assert_master_is_unique()
masterPod = pods[0]
try:
k8s.wait_for_pod_failover(failover_targets, labels)
k8s.wait_for_pod_start('spilo-role=replica')
self.assertEqual(masterPod.spec.containers[0].resources.limits['cpu'], minCPULimit,
"Expected CPU limit {}, found {}"
.format(minCPULimit, masterPod.spec.containers[0].resources.limits['cpu']))
self.assertEqual(masterPod.spec.containers[0].resources.limits['memory'], minMemoryLimit,
"Expected memory limit {}, found {}"
.format(minMemoryLimit, masterPod.spec.containers[0].resources.limits['memory']))
pods = k8s.api.core_v1.list_namespaced_pod(
'default', label_selector=labels).items
self.assert_master_is_unique()
masterPod = pods[0]
self.assertEqual(masterPod.spec.containers[0].resources.limits['cpu'], minCPULimit,
"Expected CPU limit {}, found {}"
.format(minCPULimit, masterPod.spec.containers[0].resources.limits['cpu']))
self.assertEqual(masterPod.spec.containers[0].resources.limits['memory'], minMemoryLimit,
"Expected memory limit {}, found {}"
.format(minMemoryLimit, masterPod.spec.containers[0].resources.limits['memory']))
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_multi_namespace_support(self):
@ -388,9 +461,14 @@ class EndToEndTestCase(unittest.TestCase):
pg_manifest["metadata"]["namespace"] = self.namespace
yaml.dump(pg_manifest, f, Dumper=yaml.Dumper)
k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml")
k8s.wait_for_pod_start("spilo-role=master", self.namespace)
self.assert_master_is_unique(self.namespace, "acid-test-cluster")
try:
k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml")
k8s.wait_for_pod_start("spilo-role=master", self.namespace)
self.assert_master_is_unique(self.namespace, "acid-test-cluster")
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_node_readiness_label(self):
@ -402,40 +480,45 @@ class EndToEndTestCase(unittest.TestCase):
readiness_label = 'lifecycle-status'
readiness_value = 'ready'
# get nodes of master and replica(s) (expected target of new master)
current_master_node, current_replica_nodes = k8s.get_pg_nodes(cluster_label)
num_replicas = len(current_replica_nodes)
failover_targets = self.get_failover_targets(current_master_node, current_replica_nodes)
try:
# get nodes of master and replica(s) (expected target of new master)
current_master_node, current_replica_nodes = k8s.get_pg_nodes(cluster_label)
num_replicas = len(current_replica_nodes)
failover_targets = self.get_failover_targets(current_master_node, current_replica_nodes)
# add node_readiness_label to potential failover nodes
patch_readiness_label = {
"metadata": {
"labels": {
readiness_label: readiness_value
# add node_readiness_label to potential failover nodes
patch_readiness_label = {
"metadata": {
"labels": {
readiness_label: readiness_value
}
}
}
}
for failover_target in failover_targets:
k8s.api.core_v1.patch_node(failover_target, patch_readiness_label)
for failover_target in failover_targets:
k8s.api.core_v1.patch_node(failover_target, patch_readiness_label)
# define node_readiness_label in config map which should trigger a failover of the master
patch_readiness_label_config = {
"data": {
"node_readiness_label": readiness_label + ':' + readiness_value,
# define node_readiness_label in config map which should trigger a failover of the master
patch_readiness_label_config = {
"data": {
"node_readiness_label": readiness_label + ':' + readiness_value,
}
}
}
k8s.update_config(patch_readiness_label_config)
new_master_node, new_replica_nodes = self.assert_failover(
current_master_node, num_replicas, failover_targets, cluster_label)
k8s.update_config(patch_readiness_label_config)
new_master_node, new_replica_nodes = self.assert_failover(
current_master_node, num_replicas, failover_targets, cluster_label)
# patch also node where master ran before
k8s.api.core_v1.patch_node(current_master_node, patch_readiness_label)
# patch also node where master ran before
k8s.api.core_v1.patch_node(current_master_node, patch_readiness_label)
# wait a little before proceeding with the pod distribution test
time.sleep(30)
# wait a little before proceeding with the pod distribution test
time.sleep(30)
# toggle pod anti affinity to move replica away from master node
self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label)
# toggle pod anti affinity to move replica away from master node
self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label)
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_scaling(self):
@ -445,13 +528,18 @@ class EndToEndTestCase(unittest.TestCase):
k8s = self.k8s
labels = "application=spilo,cluster-name=acid-minimal-cluster"
k8s.wait_for_pg_to_scale(3)
self.assertEqual(3, k8s.count_pods_with_label(labels))
self.assert_master_is_unique()
try:
k8s.wait_for_pg_to_scale(3)
self.assertEqual(3, k8s.count_pods_with_label(labels))
self.assert_master_is_unique()
k8s.wait_for_pg_to_scale(2)
self.assertEqual(2, k8s.count_pods_with_label(labels))
self.assert_master_is_unique()
k8s.wait_for_pg_to_scale(2)
self.assertEqual(2, k8s.count_pods_with_label(labels))
self.assert_master_is_unique()
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_service_annotations(self):
@ -466,27 +554,32 @@ class EndToEndTestCase(unittest.TestCase):
}
k8s.update_config(patch_custom_service_annotations)
pg_patch_custom_annotations = {
"spec": {
"serviceAnnotations": {
"annotation.key": "value",
"foo": "bar",
try:
pg_patch_custom_annotations = {
"spec": {
"serviceAnnotations": {
"annotation.key": "value",
"foo": "bar",
}
}
}
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_custom_annotations)
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_custom_annotations)
# wait a little before proceeding
time.sleep(30)
annotations = {
"annotation.key": "value",
"foo": "bar",
}
self.assertTrue(k8s.check_service_annotations(
"cluster-name=acid-minimal-cluster,spilo-role=master", annotations))
self.assertTrue(k8s.check_service_annotations(
"cluster-name=acid-minimal-cluster,spilo-role=replica", annotations))
# wait a little before proceeding
time.sleep(30)
annotations = {
"annotation.key": "value",
"foo": "bar",
}
self.assertTrue(k8s.check_service_annotations(
"cluster-name=acid-minimal-cluster,spilo-role=master", annotations))
self.assertTrue(k8s.check_service_annotations(
"cluster-name=acid-minimal-cluster,spilo-role=replica", annotations))
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
# clean up
unpatch_custom_service_annotations = {
@ -511,24 +604,29 @@ class EndToEndTestCase(unittest.TestCase):
}
k8s.update_config(patch_sset_propagate_annotations)
pg_crd_annotations = {
"metadata": {
"annotations": {
"deployment-time": "2020-04-30 12:00:00",
"downscaler/downtime_replicas": "0",
},
try:
pg_crd_annotations = {
"metadata": {
"annotations": {
"deployment-time": "2020-04-30 12:00:00",
"downscaler/downtime_replicas": "0",
},
}
}
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_crd_annotations)
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_crd_annotations)
# wait a little before proceeding
time.sleep(60)
annotations = {
"deployment-time": "2020-04-30 12:00:00",
"downscaler/downtime_replicas": "0",
}
self.assertTrue(k8s.check_statefulset_annotations(cluster_label, annotations))
# wait a little before proceeding
time.sleep(60)
annotations = {
"deployment-time": "2020-04-30 12:00:00",
"downscaler/downtime_replicas": "0",
}
self.assertTrue(k8s.check_statefulset_annotations(cluster_label, annotations))
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_taint_based_eviction(self):
@ -555,65 +653,29 @@ class EndToEndTestCase(unittest.TestCase):
}
}
# patch node and test if master is failing over to one of the expected nodes
k8s.api.core_v1.patch_node(current_master_node, body)
new_master_node, new_replica_nodes = self.assert_failover(
current_master_node, num_replicas, failover_targets, cluster_label)
try:
# patch node and test if master is failing over to one of the expected nodes
k8s.api.core_v1.patch_node(current_master_node, body)
new_master_node, new_replica_nodes = self.assert_failover(
current_master_node, num_replicas, failover_targets, cluster_label)
# add toleration to pods
patch_toleration_config = {
"data": {
"toleration": "key:postgres,operator:Exists,effect:NoExecute"
# add toleration to pods
patch_toleration_config = {
"data": {
"toleration": "key:postgres,operator:Exists,effect:NoExecute"
}
}
}
k8s.update_config(patch_toleration_config)
k8s.update_config(patch_toleration_config)
# wait a little before proceeding with the pod distribution test
time.sleep(30)
# wait a little before proceeding with the pod distribution test
time.sleep(30)
# toggle pod anti affinity to move replica away from master node
self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label)
# toggle pod anti affinity to move replica away from master node
self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label)
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_infrastructure_roles(self):
'''
Test using external secrets for infrastructure roles
'''
k8s = self.k8s
# update infrastructure roles description
secret_name = "postgresql-infrastructure-roles"
roles = "secretname: postgresql-infrastructure-roles-new, userkey: user, rolekey: memberof, passwordkey: password, defaultrolevalue: robot_zmon"
patch_infrastructure_roles = {
"data": {
"infrastructure_roles_secret_name": secret_name,
"infrastructure_roles_secrets": roles,
},
}
k8s.update_config(patch_infrastructure_roles)
# wait a little before proceeding
time.sleep(30)
# check that new roles are represented in the config by requesting the
# operator configuration via API
operator_pod = k8s.get_operator_pod()
get_config_cmd = "wget --quiet -O - localhost:8080/config"
result = k8s.exec_with_kubectl(operator_pod.metadata.name, get_config_cmd)
roles_dict = (json.loads(result.stdout)
.get("controller", {})
.get("InfrastructureRoles"))
self.assertTrue("robot_zmon_acid_monitoring_new" in roles_dict)
role = roles_dict["robot_zmon_acid_monitoring_new"]
role.pop("Password", None)
self.assertDictEqual(role, {
"Name": "robot_zmon_acid_monitoring_new",
"Flags": None,
"MemberOf": ["robot_zmon"],
"Parameters": None,
"AdminRole": "",
"Origin": 2,
})
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_x_cluster_deletion(self):
@ -632,65 +694,86 @@ class EndToEndTestCase(unittest.TestCase):
}
k8s.update_config(patch_delete_annotations)
# this delete attempt should be omitted because of missing annotations
k8s.api.custom_objects_api.delete_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster")
try:
# this delete attempt should be omitted because of missing annotations
k8s.api.custom_objects_api.delete_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster")
# check that pods and services are still there
k8s.wait_for_running_pods(cluster_label, 2)
k8s.wait_for_service(cluster_label)
# check that pods and services are still there
k8s.wait_for_running_pods(cluster_label, 2)
k8s.wait_for_service(cluster_label)
# recreate Postgres cluster resource
k8s.create_with_kubectl("manifests/minimal-postgres-manifest.yaml")
# recreate Postgres cluster resource
k8s.create_with_kubectl("manifests/minimal-postgres-manifest.yaml")
# wait a little before proceeding
time.sleep(10)
# wait a little before proceeding
time.sleep(10)
# add annotations to the postgresql object
deleteDate = datetime.today().strftime('%Y-%m-%d')
pg_patch_delete_annotations = {
"metadata": {
"annotations": {
"delete-date": deleteDate,
"delete-clustername": "acid-minimal-cluster",
# add annotations to manifest
deleteDate = datetime.today().strftime('%Y-%m-%d')
pg_patch_delete_annotations = {
"metadata": {
"annotations": {
"delete-date": deleteDate,
"delete-clustername": "acid-minimal-cluster",
}
}
}
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_delete_annotations)
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_delete_annotations)
# wait a little before proceeding
time.sleep(10)
k8s.wait_for_running_pods(cluster_label, 2)
k8s.wait_for_service(cluster_label)
# wait a little before proceeding
time.sleep(10)
k8s.wait_for_running_pods(cluster_label, 2)
k8s.wait_for_service(cluster_label)
# now delete process should be triggered
k8s.api.custom_objects_api.delete_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster")
# now delete process should be triggered
k8s.api.custom_objects_api.delete_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster")
# wait until cluster is deleted
time.sleep(120)
# wait a little before proceeding
time.sleep(10)
# check if everything has been deleted
self.assertEqual(0, k8s.count_pods_with_label(cluster_label))
self.assertEqual(0, k8s.count_services_with_label(cluster_label))
self.assertEqual(0, k8s.count_endpoints_with_label(cluster_label))
self.assertEqual(0, k8s.count_statefulsets_with_label(cluster_label))
self.assertEqual(0, k8s.count_deployments_with_label(cluster_label))
self.assertEqual(0, k8s.count_pdbs_with_label(cluster_label))
self.assertEqual(0, k8s.count_secrets_with_label(cluster_label))
# add annotations to the postgresql object
deleteDate = datetime.today().strftime('%Y-%m-%d')
pg_patch_delete_annotations = {
"metadata": {
"annotations": {
"delete-date": deleteDate,
"delete-clustername": "acid-minimal-cluster",
}
}
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_delete_annotations)
# wait until cluster is deleted
time.sleep(120)
# check if everything has been deleted
self.assertEqual(0, k8s.count_pods_with_label(cluster_label))
self.assertEqual(0, k8s.count_services_with_label(cluster_label))
self.assertEqual(0, k8s.count_endpoints_with_label(cluster_label))
self.assertEqual(0, k8s.count_statefulsets_with_label(cluster_label))
self.assertEqual(0, k8s.count_deployments_with_label(cluster_label))
self.assertEqual(0, k8s.count_pdbs_with_label(cluster_label))
self.assertEqual(0, k8s.count_secrets_with_label(cluster_label))
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
def get_failover_targets(self, master_node, replica_nodes):
'''
If all pods live on the same node, failover will happen to other worker(s)
'''
k8s = self.k8s
k8s_master_exclusion = 'kubernetes.io/hostname!=postgres-operator-e2e-tests-control-plane'
failover_targets = [x for x in replica_nodes if x != master_node]
if len(failover_targets) == 0:
nodes = k8s.api.core_v1.list_node()
nodes = k8s.api.core_v1.list_node(label_selector=k8s_master_exclusion)
for n in nodes.items:
if "node-role.kubernetes.io/master" not in n.metadata.labels and n.metadata.name != master_node:
if n.metadata.name != master_node:
failover_targets.append(n.metadata.name)
return failover_targets
@ -738,8 +821,7 @@ class EndToEndTestCase(unittest.TestCase):
}
}
k8s.update_config(patch_enable_antiaffinity)
self.assert_failover(
master_node, len(replica_nodes), failover_targets, cluster_label)
self.assert_failover(master_node, len(replica_nodes), failover_targets, cluster_label)
# now disable pod anti affintiy again which will cause yet another failover
patch_disable_antiaffinity = {
@ -767,6 +849,7 @@ class K8sApi:
self.batch_v1_beta1 = client.BatchV1beta1Api()
self.custom_objects_api = client.CustomObjectsApi()
self.policy_v1_beta1 = client.PolicyV1beta1Api()
self.storage_v1_api = client.StorageV1Api()
class K8s:
@ -944,8 +1027,8 @@ class K8s:
def exec_with_kubectl(self, pod, cmd):
return subprocess.run(["./exec.sh", pod, cmd],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def get_effective_pod_image(self, pod_name, namespace='default'):
'''

15
go.mod
View File

@ -3,16 +3,21 @@ module github.com/zalando/postgres-operator
go 1.14
require (
<<<<<<< HEAD
github.com/aws/aws-sdk-go v1.34.1
github.com/emicklei/go-restful v2.9.6+incompatible // indirect
github.com/evanphx/json-patch v4.5.0+incompatible // indirect
github.com/googleapis/gnostic v0.3.0 // indirect
=======
github.com/aws/aws-sdk-go v1.34.10
>>>>>>> master
github.com/lib/pq v1.8.0
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d
github.com/r3labs/diff v1.1.0
github.com/sirupsen/logrus v1.6.0
github.com/stretchr/testify v1.5.1
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
<<<<<<< HEAD
golang.org/x/tools v0.0.0-20200828013309-97019fc2e64b // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
gopkg.in/yaml.v2 v2.2.8
@ -22,4 +27,14 @@ require (
k8s.io/client-go v11.0.0+incompatible
k8s.io/code-generator v0.18.8
sigs.k8s.io/kind v0.5.1 // indirect
=======
golang.org/x/tools v0.0.0-20200826040757-bc8aaaa29e06 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
gopkg.in/yaml.v2 v2.2.8
k8s.io/api v0.18.8
k8s.io/apiextensions-apiserver v0.18.0
k8s.io/apimachinery v0.18.8
k8s.io/client-go v0.18.6
k8s.io/code-generator v0.18.8
>>>>>>> master
)

40
go.sum
View File

@ -26,8 +26,8 @@ github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo
github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8=
github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
github.com/aws/aws-sdk-go v1.34.1 h1:jM0mJ9JSJyhujwxBNYKrNB8Iwp8N7J2WsQxTR4yPSck=
github.com/aws/aws-sdk-go v1.34.1/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/aws/aws-sdk-go v1.34.10 h1:VU78gcf/3wA4HNEDCHidK738l7K0Bals4SJnfnvXOtY=
github.com/aws/aws-sdk-go v1.34.10/go.mod h1:5zCpMtNQVjRREroY7sYe8lOMRSxkhG6MZveU8YkpAk0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
@ -68,8 +68,13 @@ github.com/emicklei/go-restful v2.9.6+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/evanphx/json-patch v0.0.0-20200808040245-162e5629780b/go.mod h1:NAJj0yf/KaRKURN6nyi7A9IZydMivZEm9oQLWNjfKDc=
<<<<<<< HEAD
github.com/evanphx/json-patch v4.5.0+incompatible h1:ouOWdg56aJriqS0huScTkVXPC5IcNrDCXZ6OoTAWu7M=
github.com/evanphx/json-patch v4.5.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
=======
github.com/evanphx/json-patch v4.2.0+incompatible h1:fUDGZCv/7iAN7u0puUVhvKCcsR6vRfwrJatElLBEf0I=
github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
>>>>>>> master
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
@ -413,8 +418,13 @@ golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
<<<<<<< HEAD
golang.org/x/tools v0.0.0-20200828013309-97019fc2e64b h1:TlHrnfzYWpw9fgHafO5AaiaqK81ZaJzPi+srvBuZUIQ=
golang.org/x/tools v0.0.0-20200828013309-97019fc2e64b/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
=======
golang.org/x/tools v0.0.0-20200826040757-bc8aaaa29e06 h1:ChBCbOHeLqK+j+znGPlWCcvx/t2PdxmyPBheVZxXbcc=
golang.org/x/tools v0.0.0-20200826040757-bc8aaaa29e06/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
>>>>>>> master
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
@ -460,6 +470,7 @@ honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
<<<<<<< HEAD
k8s.io/api v0.0.0-20190313235455-40a48860b5ab/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=
k8s.io/api v0.0.0-20190409021203-6e4e0e4f393b/go.mod h1:iuAfoD4hCxJ8Onx9kaTIt30j7jUFS00AXQi6QMi99vA=
k8s.io/api v0.18.8 h1:aIKUzJPb96f3fKec2lxtY7acZC9gQNDLVhfSGpxBAC4=
@ -478,6 +489,27 @@ k8s.io/client-go v11.0.0+incompatible/go.mod h1:7vJpHMYJwNQCWgzmNV+VYUl1zCObLyod
k8s.io/code-generator v0.18.8 h1:lgO1P1wjikEtzNvj7ia+x1VC4svJ28a/r0wnOLhhOTU=
k8s.io/code-generator v0.18.8/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c=
k8s.io/component-base v0.18.8/go.mod h1:00frPRDas29rx58pPCxNkhUfPbwajlyyvu8ruNgSErU=
=======
k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8=
k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI=
k8s.io/api v0.18.8 h1:aIKUzJPb96f3fKec2lxtY7acZC9gQNDLVhfSGpxBAC4=
k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY=
k8s.io/apiextensions-apiserver v0.18.0 h1:HN4/P8vpGZFvB5SOMuPPH2Wt9Y/ryX+KRvIyAkchu1Q=
k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo=
k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
k8s.io/apimachinery v0.18.8 h1:jimPrycCqgx2QPearX3to1JePz7wSbVLq+7PdBTTwQ0=
k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig=
k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw=
k8s.io/client-go v0.18.0 h1:yqKw4cTUQraZK3fcVCMeSa+lqKwcjZ5wtcOIPnxQno4=
k8s.io/client-go v0.18.0/go.mod h1:uQSYDYs4WhVZ9i6AIoEZuwUggLVEF64HOD37boKAtF8=
k8s.io/client-go v0.18.6 h1:I+oWqJbibLSGsZj8Xs8F0aWVXJVIoUHWaaJV3kUN/Zw=
k8s.io/client-go v0.18.6/go.mod h1:/fwtGLjYMS1MaM5oi+eXhKwG+1UHidUEXRh6cNsdO0Q=
k8s.io/code-generator v0.18.0/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
k8s.io/code-generator v0.18.8 h1:lgO1P1wjikEtzNvj7ia+x1VC4svJ28a/r0wnOLhhOTU=
k8s.io/code-generator v0.18.8/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c=
k8s.io/component-base v0.18.0/go.mod h1:u3BCg0z1uskkzrnAKFzulmYaEpZF7XC9Pf/uFyb1v2c=
>>>>>>> master
k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120 h1:RPscN6KhmG54S33L+lr3GS+oD1jmchIU0ll519K6FA4=
k8s.io/gengo v0.0.0-20200114144118-36b2048a9120/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
@ -486,7 +518,11 @@ k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v0.3.3/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk=
k8s.io/klog v1.0.0 h1:Pt+yjF5aB1xDSVbau4VsWe+dQNzA0qv1LlXdC2dF6Q8=
k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I=
<<<<<<< HEAD
k8s.io/kube-openapi v0.0.0-20190603182131-db7b694dc208/go.mod h1:nfDlWeOsu3pUf4yWGL+ERqohP4YsZcBJXWMK+gkzOA4=
=======
k8s.io/kube-openapi v0.0.0-20200121204235-bf4fb3bd569c/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
>>>>>>> master
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6 h1:Oh3Mzx5pJ+yIumsAD0MOECPVeXsVot0UkiaCGVyfGQY=
k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E=
k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89 h1:d4vVOjXm687F1iLSP2q3lyPPuyvTUt3aVoBpi2DqRsU=

View File

@ -0,0 +1,8 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
namespace: kube-system
name: standard
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: kubernetes.io/host-path

View File

@ -53,7 +53,7 @@ type PostgresSpec struct {
NumberOfInstances int32 `json:"numberOfInstances"`
Users map[string]UserFlags `json:"users"`
MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"`
Clone *CloneDescription `json:"clone"`
Clone *CloneDescription `json:"clone,omitempty"`
ClusterName string `json:"-"`
Databases map[string]string `json:"databases,omitempty"`
PreparedDatabases map[string]PreparedDatabase `json:"preparedDatabases,omitempty"`
@ -64,10 +64,10 @@ type PostgresSpec struct {
ShmVolume *bool `json:"enableShmVolume,omitempty"`
EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"`
LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"`
StandbyCluster *StandbyDescription `json:"standby"`
PodAnnotations map[string]string `json:"podAnnotations"`
ServiceAnnotations map[string]string `json:"serviceAnnotations"`
TLS *TLSDescription `json:"tls"`
StandbyCluster *StandbyDescription `json:"standby,omitempty"`
PodAnnotations map[string]string `json:"podAnnotations,omitempty"`
ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"`
TLS *TLSDescription `json:"tls,omitempty"`
AdditionalVolumes []AdditionalVolume `json:"additionalVolumes,omitempty"`
// deprecated json tags

View File

@ -0,0 +1,7 @@
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- deployment.yaml
- ingress.yaml
- service.yaml
- ui-service-account-rbac.yaml