merge master
This commit is contained in:
commit
c9dc72a124
|
|
@ -26,22 +26,22 @@ spec:
|
||||||
- name: Image
|
- name: Image
|
||||||
type: string
|
type: string
|
||||||
description: Spilo image to be used for Pods
|
description: Spilo image to be used for Pods
|
||||||
JSONPath: .configuration.docker_image
|
jsonPath: .configuration.docker_image
|
||||||
- name: Cluster-Label
|
- name: Cluster-Label
|
||||||
type: string
|
type: string
|
||||||
description: Label for K8s resources created by operator
|
description: Label for K8s resources created by operator
|
||||||
JSONPath: .configuration.kubernetes.cluster_name_label
|
jsonPath: .configuration.kubernetes.cluster_name_label
|
||||||
- name: Service-Account
|
- name: Service-Account
|
||||||
type: string
|
type: string
|
||||||
description: Name of service account to be used
|
description: Name of service account to be used
|
||||||
JSONPath: .configuration.kubernetes.pod_service_account_name
|
jsonPath: .configuration.kubernetes.pod_service_account_name
|
||||||
- name: Min-Instances
|
- name: Min-Instances
|
||||||
type: integer
|
type: integer
|
||||||
description: Minimum number of instances per Postgres cluster
|
description: Minimum number of instances per Postgres cluster
|
||||||
JSONPath: .configuration.min_instances
|
jsonPath: .configuration.min_instances
|
||||||
- name: Age
|
- name: Age
|
||||||
type: date
|
type: date
|
||||||
JSONPath: .metadata.creationTimestamp
|
jsonPath: .metadata.creationTimestamp
|
||||||
schema:
|
schema:
|
||||||
openAPIV3Schema:
|
openAPIV3Schema:
|
||||||
type: object
|
type: object
|
||||||
|
|
@ -49,15 +49,15 @@ spec:
|
||||||
- kind
|
- kind
|
||||||
- apiVersion
|
- apiVersion
|
||||||
- configuration
|
- configuration
|
||||||
properties:
|
properties:
|
||||||
kind:
|
kind:
|
||||||
type: string
|
type: string
|
||||||
enum:
|
enum:
|
||||||
- OperatorConfiguration
|
- OperatorConfiguration
|
||||||
apiVersion:
|
apiVersion:
|
||||||
type: string
|
type: string
|
||||||
enum:
|
enum:
|
||||||
- acid.zalan.do/v1
|
- acid.zalan.do/v1
|
||||||
configuration:
|
configuration:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|
|
||||||
|
|
@ -26,34 +26,34 @@ spec:
|
||||||
- name: Team
|
- name: Team
|
||||||
type: string
|
type: string
|
||||||
description: Team responsible for Postgres CLuster
|
description: Team responsible for Postgres CLuster
|
||||||
JSONPath: .spec.teamId
|
jsonPath: .spec.teamId
|
||||||
- name: Version
|
- name: Version
|
||||||
type: string
|
type: string
|
||||||
description: PostgreSQL version
|
description: PostgreSQL version
|
||||||
JSONPath: .spec.postgresql.version
|
jsonPath: .spec.postgresql.version
|
||||||
- name: Pods
|
- name: Pods
|
||||||
type: integer
|
type: integer
|
||||||
description: Number of Pods per Postgres cluster
|
description: Number of Pods per Postgres cluster
|
||||||
JSONPath: .spec.numberOfInstances
|
jsonPath: .spec.numberOfInstances
|
||||||
- name: Volume
|
- name: Volume
|
||||||
type: string
|
type: string
|
||||||
description: Size of the bound volume
|
description: Size of the bound volume
|
||||||
JSONPath: .spec.volume.size
|
jsonPath: .spec.volume.size
|
||||||
- name: CPU-Request
|
- name: CPU-Request
|
||||||
type: string
|
type: string
|
||||||
description: Requested CPU for Postgres containers
|
description: Requested CPU for Postgres containers
|
||||||
JSONPath: .spec.resources.requests.cpu
|
jsonPath: .spec.resources.requests.cpu
|
||||||
- name: Memory-Request
|
- name: Memory-Request
|
||||||
type: string
|
type: string
|
||||||
description: Requested memory for Postgres containers
|
description: Requested memory for Postgres containers
|
||||||
JSONPath: .spec.resources.requests.memory
|
jsonPath: .spec.resources.requests.memory
|
||||||
- name: Age
|
- name: Age
|
||||||
type: date
|
type: date
|
||||||
JSONPath: .metadata.creationTimestamp
|
jsonPath: .metadata.creationTimestamp
|
||||||
- name: Status
|
- name: Status
|
||||||
type: string
|
type: string
|
||||||
description: Current sync status of postgresql resource
|
description: Current sync status of postgresql resource
|
||||||
JSONPath: .status.PostgresClusterStatus
|
jsonPath: .status.PostgresClusterStatus
|
||||||
schema:
|
schema:
|
||||||
openAPIV3Schema:
|
openAPIV3Schema:
|
||||||
type: object
|
type: object
|
||||||
|
|
|
||||||
|
|
@ -105,6 +105,10 @@ rules:
|
||||||
- delete
|
- delete
|
||||||
- get
|
- get
|
||||||
- list
|
- list
|
||||||
|
{{- if toString .Values.configKubernetes.storage_resize_mode | eq "pvc" }}
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
|
{{- end }}
|
||||||
# to read existing PVs. Creation should be done via dynamic provisioning
|
# to read existing PVs. Creation should be done via dynamic provisioning
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- ""
|
- ""
|
||||||
|
|
@ -113,7 +117,9 @@ rules:
|
||||||
verbs:
|
verbs:
|
||||||
- get
|
- get
|
||||||
- list
|
- list
|
||||||
|
{{- if toString .Values.configKubernetes.storage_resize_mode | eq "ebs" }}
|
||||||
- update # only for resizing AWS volumes
|
- update # only for resizing AWS volumes
|
||||||
|
{{- end }}
|
||||||
# to watch Spilo pods and do rolling updates. Creation via StatefulSet
|
# to watch Spilo pods and do rolling updates. Creation via StatefulSet
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- ""
|
- ""
|
||||||
|
|
|
||||||
|
|
@ -136,7 +136,7 @@ configKubernetes:
|
||||||
# whether the Spilo container should run in privileged mode
|
# whether the Spilo container should run in privileged mode
|
||||||
spilo_privileged: false
|
spilo_privileged: false
|
||||||
# storage resize strategy, available options are: ebs, pvc, off
|
# storage resize strategy, available options are: ebs, pvc, off
|
||||||
storage_resize_mode: ebs
|
storage_resize_mode: pvc
|
||||||
# operator watches for postgres objects in the given namespace
|
# operator watches for postgres objects in the given namespace
|
||||||
watched_namespace: "*" # listen to all namespaces
|
watched_namespace: "*" # listen to all namespaces
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -130,7 +130,7 @@ configKubernetes:
|
||||||
# whether the Spilo container should run in privileged mode
|
# whether the Spilo container should run in privileged mode
|
||||||
spilo_privileged: "false"
|
spilo_privileged: "false"
|
||||||
# storage resize strategy, available options are: ebs, pvc, off
|
# storage resize strategy, available options are: ebs, pvc, off
|
||||||
storage_resize_mode: ebs
|
storage_resize_mode: pvc
|
||||||
# operator watches for postgres objects in the given namespace
|
# operator watches for postgres objects in the given namespace
|
||||||
watched_namespace: "*" # listen to all namespaces
|
watched_namespace: "*" # listen to all namespaces
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -56,12 +56,24 @@ NOCLEANUP=True ./run.sh main tests.test_e2e.EndToEndTestCase.test_lazy_spilo_upg
|
||||||
|
|
||||||
## Inspecting Kind
|
## Inspecting Kind
|
||||||
|
|
||||||
If you want to inspect Kind/Kubernetes cluster, use the following script to exec into the K8s setup and then use `kubectl`
|
If you want to inspect Kind/Kubernetes cluster, switch `kubeconfig` file and context
|
||||||
|
```bash
|
||||||
|
# save the old config in case you have it
|
||||||
|
export KUBECONFIG_SAVED=$KUBECONFIG
|
||||||
|
|
||||||
|
# use the one created by e2e tests
|
||||||
|
export KUBECONFIG=/tmp/kind-config-postgres-operator-e2e-tests
|
||||||
|
|
||||||
|
# this kubeconfig defines a single context
|
||||||
|
kubectl config use-context kind-postgres-operator-e2e-tests
|
||||||
|
```
|
||||||
|
|
||||||
|
or use the following script to exec into the K8s setup and then use `kubectl`
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
./exec_into_env.sh
|
./exec_into_env.sh
|
||||||
|
|
||||||
# use kube ctl
|
# use kubectl
|
||||||
kubectl get pods
|
kubectl get pods
|
||||||
|
|
||||||
# watch relevant objects
|
# watch relevant objects
|
||||||
|
|
@ -71,6 +83,14 @@ kubectl get pods
|
||||||
./scripts/get_logs.sh
|
./scripts/get_logs.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
|
If you want to inspect the state of the `kind` cluster manually with a single command, add a `context` flag
|
||||||
|
```bash
|
||||||
|
kubectl get pods --context kind-kind
|
||||||
|
```
|
||||||
|
or set the context for a few commands at once
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
## Cleaning up Kind
|
## Cleaning up Kind
|
||||||
|
|
||||||
To cleanup kind and start fresh
|
To cleanup kind and start fresh
|
||||||
|
|
@ -79,6 +99,12 @@ To cleanup kind and start fresh
|
||||||
e2e/run.sh cleanup
|
e2e/run.sh cleanup
|
||||||
```
|
```
|
||||||
|
|
||||||
|
That also helps in case you see the
|
||||||
|
```
|
||||||
|
ERROR: no nodes found for cluster "postgres-operator-e2e-tests"
|
||||||
|
```
|
||||||
|
that happens when the `kind` cluster was deleted manually but its configuraiton file was not.
|
||||||
|
|
||||||
## Covered use cases
|
## Covered use cases
|
||||||
|
|
||||||
The current tests are all bundled in [`test_e2e.py`](tests/test_e2e.py):
|
The current tests are all bundled in [`test_e2e.py`](tests/test_e2e.py):
|
||||||
|
|
|
||||||
|
|
@ -1,17 +1,20 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
watch -c "
|
watch -c "
|
||||||
kubectl get postgresql
|
kubectl get postgresql --all-namespaces
|
||||||
echo
|
echo
|
||||||
echo -n 'Rolling upgrade pending: '
|
echo -n 'Rolling upgrade pending: '
|
||||||
kubectl get statefulset -o jsonpath='{.items..metadata.annotations.zalando-postgres-operator-rolling-update-required}'
|
kubectl get statefulset -o jsonpath='{.items..metadata.annotations.zalando-postgres-operator-rolling-update-required}'
|
||||||
echo
|
echo
|
||||||
echo
|
echo
|
||||||
kubectl get pods -o wide
|
echo 'Pods'
|
||||||
|
kubectl get pods -l application=spilo -l name=postgres-operator -l application=db-connection-pooler -o wide --all-namespaces
|
||||||
echo
|
echo
|
||||||
kubectl get statefulsets
|
echo 'Statefulsets'
|
||||||
|
kubectl get statefulsets --all-namespaces
|
||||||
echo
|
echo
|
||||||
kubectl get deployments
|
echo 'Deployments'
|
||||||
|
kubectl get deployments --all-namespaces -l application=db-connection-pooler -l name=postgres-operator
|
||||||
echo
|
echo
|
||||||
echo
|
echo
|
||||||
echo 'Step from operator deployment'
|
echo 'Step from operator deployment'
|
||||||
|
|
|
||||||
|
|
@ -11,9 +11,11 @@ from datetime import datetime
|
||||||
from kubernetes import client, config
|
from kubernetes import client, config
|
||||||
from kubernetes.client.rest import ApiException
|
from kubernetes.client.rest import ApiException
|
||||||
|
|
||||||
|
|
||||||
def to_selector(labels):
|
def to_selector(labels):
|
||||||
return ",".join(["=".join(l) for l in labels.items()])
|
return ",".join(["=".join(l) for l in labels.items()])
|
||||||
|
|
||||||
|
|
||||||
class K8sApi:
|
class K8sApi:
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
|
|
@ -184,7 +186,7 @@ class K8s:
|
||||||
|
|
||||||
def count_running_pods(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'):
|
def count_running_pods(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'):
|
||||||
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
|
||||||
return len(list(filter(lambda x: x.status.phase=='Running', pods)))
|
return len(list(filter(lambda x: x.status.phase == 'Running', pods)))
|
||||||
|
|
||||||
def wait_for_pod_failover(self, failover_targets, labels, namespace='default'):
|
def wait_for_pod_failover(self, failover_targets, labels, namespace='default'):
|
||||||
pod_phase = 'Failing over'
|
pod_phase = 'Failing over'
|
||||||
|
|
@ -210,9 +212,9 @@ class K8s:
|
||||||
def wait_for_logical_backup_job_creation(self):
|
def wait_for_logical_backup_job_creation(self):
|
||||||
self.wait_for_logical_backup_job(expected_num_of_jobs=1)
|
self.wait_for_logical_backup_job(expected_num_of_jobs=1)
|
||||||
|
|
||||||
def delete_operator_pod(self, step="Delete operator deplyment"):
|
def delete_operator_pod(self, step="Delete operator pod"):
|
||||||
operator_pod = self.api.core_v1.list_namespaced_pod('default', label_selector="name=postgres-operator").items[0].metadata.name
|
# patching the pod template in the deployment restarts the operator pod
|
||||||
self.api.apps_v1.patch_namespaced_deployment("postgres-operator","default", {"spec":{"template":{"metadata":{"annotations":{"step":"{}-{}".format(step, time.time())}}}}})
|
self.api.apps_v1.patch_namespaced_deployment("postgres-operator","default", {"spec":{"template":{"metadata":{"annotations":{"step":"{}-{}".format(step, datetime.fromtimestamp(time.time()))}}}}})
|
||||||
self.wait_for_operator_pod_start()
|
self.wait_for_operator_pod_start()
|
||||||
|
|
||||||
def update_config(self, config_map_patch, step="Updating operator deployment"):
|
def update_config(self, config_map_patch, step="Updating operator deployment"):
|
||||||
|
|
@ -241,7 +243,7 @@ class K8s:
|
||||||
|
|
||||||
def get_operator_state(self):
|
def get_operator_state(self):
|
||||||
pod = self.get_operator_pod()
|
pod = self.get_operator_pod()
|
||||||
if pod == None:
|
if pod is None:
|
||||||
return None
|
return None
|
||||||
pod = pod.metadata.name
|
pod = pod.metadata.name
|
||||||
|
|
||||||
|
|
@ -251,7 +253,6 @@ class K8s:
|
||||||
|
|
||||||
return json.loads(r.stdout.decode())
|
return json.loads(r.stdout.decode())
|
||||||
|
|
||||||
|
|
||||||
def get_patroni_running_members(self, pod="acid-minimal-cluster-0"):
|
def get_patroni_running_members(self, pod="acid-minimal-cluster-0"):
|
||||||
result = self.get_patroni_state(pod)
|
result = self.get_patroni_state(pod)
|
||||||
return list(filter(lambda x: "State" in x and x["State"] == "running", result))
|
return list(filter(lambda x: "State" in x and x["State"] == "running", result))
|
||||||
|
|
@ -260,7 +261,7 @@ class K8s:
|
||||||
try:
|
try:
|
||||||
deployment = self.api.apps_v1.read_namespaced_deployment(name, namespace)
|
deployment = self.api.apps_v1.read_namespaced_deployment(name, namespace)
|
||||||
return deployment.spec.replicas
|
return deployment.spec.replicas
|
||||||
except ApiException as e:
|
except ApiException:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def get_statefulset_image(self, label_selector="application=spilo,cluster-name=acid-minimal-cluster", namespace='default'):
|
def get_statefulset_image(self, label_selector="application=spilo,cluster-name=acid-minimal-cluster", namespace='default'):
|
||||||
|
|
@ -463,7 +464,6 @@ class K8sBase:
|
||||||
self.wait_for_logical_backup_job(expected_num_of_jobs=1)
|
self.wait_for_logical_backup_job(expected_num_of_jobs=1)
|
||||||
|
|
||||||
def delete_operator_pod(self, step="Delete operator deplyment"):
|
def delete_operator_pod(self, step="Delete operator deplyment"):
|
||||||
operator_pod = self.api.core_v1.list_namespaced_pod('default', label_selector="name=postgres-operator").items[0].metadata.name
|
|
||||||
self.api.apps_v1.patch_namespaced_deployment("postgres-operator","default", {"spec":{"template":{"metadata":{"annotations":{"step":"{}-{}".format(step, time.time())}}}}})
|
self.api.apps_v1.patch_namespaced_deployment("postgres-operator","default", {"spec":{"template":{"metadata":{"annotations":{"step":"{}-{}".format(step, time.time())}}}}})
|
||||||
self.wait_for_operator_pod_start()
|
self.wait_for_operator_pod_start()
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2,15 +2,14 @@ import json
|
||||||
import unittest
|
import unittest
|
||||||
import time
|
import time
|
||||||
import timeout_decorator
|
import timeout_decorator
|
||||||
import subprocess
|
|
||||||
import warnings
|
|
||||||
import os
|
import os
|
||||||
import yaml
|
import yaml
|
||||||
|
|
||||||
from datetime import datetime
|
from datetime import datetime
|
||||||
from kubernetes import client, config
|
from kubernetes import client
|
||||||
|
|
||||||
from tests.k8s_api import K8s
|
from tests.k8s_api import K8s
|
||||||
|
from kubernetes.client.rest import ApiException
|
||||||
|
|
||||||
SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-12:1.6-p5"
|
SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-12:1.6-p5"
|
||||||
SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p114"
|
SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-cdp-12:1.6-p114"
|
||||||
|
|
@ -89,17 +88,17 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
# remove existing local storage class and create hostpath class
|
# remove existing local storage class and create hostpath class
|
||||||
try:
|
try:
|
||||||
k8s.api.storage_v1_api.delete_storage_class("standard")
|
k8s.api.storage_v1_api.delete_storage_class("standard")
|
||||||
except:
|
except ApiException as e:
|
||||||
print("Storage class has already been remove")
|
print("Failed to delete the 'standard' storage class: {0}".format(e))
|
||||||
|
|
||||||
# operator deploys pod service account there on start up
|
# operator deploys pod service account there on start up
|
||||||
# needed for test_multi_namespace_support()
|
# needed for test_multi_namespace_support()
|
||||||
cls.namespace = "test"
|
cls.test_namespace = "test"
|
||||||
try:
|
try:
|
||||||
v1_namespace = client.V1Namespace(metadata=client.V1ObjectMeta(name=cls.namespace))
|
v1_namespace = client.V1Namespace(metadata=client.V1ObjectMeta(name=cls.test_namespace))
|
||||||
k8s.api.core_v1.create_namespace(v1_namespace)
|
k8s.api.core_v1.create_namespace(v1_namespace)
|
||||||
except:
|
except ApiException as e:
|
||||||
print("Namespace already present")
|
print("Failed to create the '{0}' namespace: {1}".format(cls.test_namespace, e))
|
||||||
|
|
||||||
# submit the most recent operator image built on the Docker host
|
# submit the most recent operator image built on the Docker host
|
||||||
with open("manifests/postgres-operator.yaml", 'r+') as f:
|
with open("manifests/postgres-operator.yaml", 'r+') as f:
|
||||||
|
|
@ -135,10 +134,8 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
|
|
||||||
# make sure we start a new operator on every new run,
|
# make sure we start a new operator on every new run,
|
||||||
# this tackles the problem when kind is reused
|
# this tackles the problem when kind is reused
|
||||||
# and the Docker image is infact changed (dirty one)
|
# and the Docker image is in fact changed (dirty one)
|
||||||
|
|
||||||
# patch resync period, this can catch some problems with hanging e2e tests
|
|
||||||
# k8s.update_config({"data": {"resync_period":"30s"}},step="TestSuite setup")
|
|
||||||
k8s.update_config({}, step="TestSuite Startup")
|
k8s.update_config({}, step="TestSuite Startup")
|
||||||
|
|
||||||
actual_operator_image = k8s.api.core_v1.list_namespaced_pod(
|
actual_operator_image = k8s.api.core_v1.list_namespaced_pod(
|
||||||
|
|
@ -170,9 +167,6 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
'connection-pooler': 'acid-minimal-cluster-pooler',
|
'connection-pooler': 'acid-minimal-cluster-pooler',
|
||||||
})
|
})
|
||||||
|
|
||||||
pod_selector = to_selector(pod_labels)
|
|
||||||
service_selector = to_selector(service_labels)
|
|
||||||
|
|
||||||
# enable connection pooler
|
# enable connection pooler
|
||||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||||
'acid.zalan.do', 'v1', 'default',
|
'acid.zalan.do', 'v1', 'default',
|
||||||
|
|
@ -347,7 +341,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
k8s.update_config(patch_infrastructure_roles)
|
k8s.update_config(patch_infrastructure_roles)
|
||||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0":"idle"}, "Operator does not get in sync")
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# check that new roles are represented in the config by requesting the
|
# check that new roles are represented in the config by requesting the
|
||||||
|
|
@ -604,19 +598,25 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
|
|
||||||
with open("manifests/complete-postgres-manifest.yaml", 'r+') as f:
|
with open("manifests/complete-postgres-manifest.yaml", 'r+') as f:
|
||||||
pg_manifest = yaml.safe_load(f)
|
pg_manifest = yaml.safe_load(f)
|
||||||
pg_manifest["metadata"]["namespace"] = self.namespace
|
pg_manifest["metadata"]["namespace"] = self.test_namespace
|
||||||
yaml.dump(pg_manifest, f, Dumper=yaml.Dumper)
|
yaml.dump(pg_manifest, f, Dumper=yaml.Dumper)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml")
|
k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml")
|
||||||
k8s.wait_for_pod_start("spilo-role=master", self.namespace)
|
k8s.wait_for_pod_start("spilo-role=master", self.test_namespace)
|
||||||
self.assert_master_is_unique(self.namespace, "acid-test-cluster")
|
self.assert_master_is_unique(self.test_namespace, "acid-test-cluster")
|
||||||
|
|
||||||
except timeout_decorator.TimeoutError:
|
except timeout_decorator.TimeoutError:
|
||||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||||
raise
|
raise
|
||||||
finally:
|
finally:
|
||||||
k8s.api.core_v1.delete_namespace(self.namespace)
|
# delete the new cluster so that the k8s_api.get_operator_state works correctly in subsequent tests
|
||||||
|
# ideally we should delete the 'test' namespace here but
|
||||||
|
# the pods inside the namespace stuck in the Terminating state making the test time out
|
||||||
|
k8s.api.custom_objects_api.delete_namespaced_custom_object(
|
||||||
|
"acid.zalan.do", "v1", self.test_namespace, "postgresqls", "acid-test-cluster")
|
||||||
|
time.sleep(5)
|
||||||
|
|
||||||
|
|
||||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||||
def test_zz_node_readiness_label(self):
|
def test_zz_node_readiness_label(self):
|
||||||
|
|
@ -825,14 +825,14 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
k8s.update_config(patch_delete_annotations)
|
k8s.update_config(patch_delete_annotations)
|
||||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0":"idle"}, "Operator does not get in sync")
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# this delete attempt should be omitted because of missing annotations
|
# this delete attempt should be omitted because of missing annotations
|
||||||
k8s.api.custom_objects_api.delete_namespaced_custom_object(
|
k8s.api.custom_objects_api.delete_namespaced_custom_object(
|
||||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster")
|
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster")
|
||||||
time.sleep(5)
|
time.sleep(5)
|
||||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0":"idle"}, "Operator does not get in sync")
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
|
|
||||||
# check that pods and services are still there
|
# check that pods and services are still there
|
||||||
k8s.wait_for_running_pods(cluster_label, 2)
|
k8s.wait_for_running_pods(cluster_label, 2)
|
||||||
|
|
@ -843,7 +843,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
|
|
||||||
# wait a little before proceeding
|
# wait a little before proceeding
|
||||||
time.sleep(10)
|
time.sleep(10)
|
||||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0":"idle"}, "Operator does not get in sync")
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
|
|
||||||
# add annotations to manifest
|
# add annotations to manifest
|
||||||
delete_date = datetime.today().strftime('%Y-%m-%d')
|
delete_date = datetime.today().strftime('%Y-%m-%d')
|
||||||
|
|
@ -857,7 +857,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
}
|
}
|
||||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_delete_annotations)
|
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_delete_annotations)
|
||||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0":"idle"}, "Operator does not get in sync")
|
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||||
|
|
||||||
# wait a little before proceeding
|
# wait a little before proceeding
|
||||||
time.sleep(20)
|
time.sleep(20)
|
||||||
|
|
@ -884,7 +884,7 @@ class EndToEndTestCase(unittest.TestCase):
|
||||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||||
raise
|
raise
|
||||||
|
|
||||||
#reset configmap
|
# reset configmap
|
||||||
patch_delete_annotations = {
|
patch_delete_annotations = {
|
||||||
"data": {
|
"data": {
|
||||||
"delete_annotation_date_key": "",
|
"delete_annotation_date_key": "",
|
||||||
|
|
|
||||||
|
|
@ -107,7 +107,7 @@ data:
|
||||||
# spilo_runasgroup: 103
|
# spilo_runasgroup: 103
|
||||||
# spilo_fsgroup: 103
|
# spilo_fsgroup: 103
|
||||||
spilo_privileged: "false"
|
spilo_privileged: "false"
|
||||||
# storage_resize_mode: "off"
|
storage_resize_mode: "pvc"
|
||||||
super_username: postgres
|
super_username: postgres
|
||||||
# team_admin_role: "admin"
|
# team_admin_role: "admin"
|
||||||
# team_api_role_configuration: "log_statement:all"
|
# team_api_role_configuration: "log_statement:all"
|
||||||
|
|
|
||||||
|
|
@ -106,6 +106,8 @@ rules:
|
||||||
- delete
|
- delete
|
||||||
- get
|
- get
|
||||||
- list
|
- list
|
||||||
|
- patch
|
||||||
|
- update
|
||||||
# to read existing PVs. Creation should be done via dynamic provisioning
|
# to read existing PVs. Creation should be done via dynamic provisioning
|
||||||
- apiGroups:
|
- apiGroups:
|
||||||
- ""
|
- ""
|
||||||
|
|
|
||||||
|
|
@ -22,22 +22,22 @@ spec:
|
||||||
- name: Image
|
- name: Image
|
||||||
type: string
|
type: string
|
||||||
description: Spilo image to be used for Pods
|
description: Spilo image to be used for Pods
|
||||||
JSONPath: .configuration.docker_image
|
jsonPath: .configuration.docker_image
|
||||||
- name: Cluster-Label
|
- name: Cluster-Label
|
||||||
type: string
|
type: string
|
||||||
description: Label for K8s resources created by operator
|
description: Label for K8s resources created by operator
|
||||||
JSONPath: .configuration.kubernetes.cluster_name_label
|
jsonPath: .configuration.kubernetes.cluster_name_label
|
||||||
- name: Service-Account
|
- name: Service-Account
|
||||||
type: string
|
type: string
|
||||||
description: Name of service account to be used
|
description: Name of service account to be used
|
||||||
JSONPath: .configuration.kubernetes.pod_service_account_name
|
jsonPath: .configuration.kubernetes.pod_service_account_name
|
||||||
- name: Min-Instances
|
- name: Min-Instances
|
||||||
type: integer
|
type: integer
|
||||||
description: Minimum number of instances per Postgres cluster
|
description: Minimum number of instances per Postgres cluster
|
||||||
JSONPath: .configuration.min_instances
|
jsonPath: .configuration.min_instances
|
||||||
- name: Age
|
- name: Age
|
||||||
type: date
|
type: date
|
||||||
JSONPath: .metadata.creationTimestamp
|
jsonPath: .metadata.creationTimestamp
|
||||||
schema:
|
schema:
|
||||||
openAPIV3Schema:
|
openAPIV3Schema:
|
||||||
type: object
|
type: object
|
||||||
|
|
@ -45,15 +45,15 @@ spec:
|
||||||
- kind
|
- kind
|
||||||
- apiVersion
|
- apiVersion
|
||||||
- configuration
|
- configuration
|
||||||
properties:
|
properties:
|
||||||
kind:
|
kind:
|
||||||
type: string
|
type: string
|
||||||
enum:
|
enum:
|
||||||
- OperatorConfiguration
|
- OperatorConfiguration
|
||||||
apiVersion:
|
apiVersion:
|
||||||
type: string
|
type: string
|
||||||
enum:
|
enum:
|
||||||
- acid.zalan.do/v1
|
- acid.zalan.do/v1
|
||||||
configuration:
|
configuration:
|
||||||
type: object
|
type: object
|
||||||
properties:
|
properties:
|
||||||
|
|
|
||||||
|
|
@ -72,7 +72,7 @@ configuration:
|
||||||
# spilo_runasgroup: 103
|
# spilo_runasgroup: 103
|
||||||
# spilo_fsgroup: 103
|
# spilo_fsgroup: 103
|
||||||
spilo_privileged: false
|
spilo_privileged: false
|
||||||
storage_resize_mode: ebs
|
storage_resize_mode: pvc
|
||||||
# toleration: {}
|
# toleration: {}
|
||||||
# watched_namespace: ""
|
# watched_namespace: ""
|
||||||
postgres_pod_resources:
|
postgres_pod_resources:
|
||||||
|
|
|
||||||
|
|
@ -22,34 +22,34 @@ spec:
|
||||||
- name: Team
|
- name: Team
|
||||||
type: string
|
type: string
|
||||||
description: Team responsible for Postgres CLuster
|
description: Team responsible for Postgres CLuster
|
||||||
JSONPath: .spec.teamId
|
jsonPath: .spec.teamId
|
||||||
- name: Version
|
- name: Version
|
||||||
type: string
|
type: string
|
||||||
description: PostgreSQL version
|
description: PostgreSQL version
|
||||||
JSONPath: .spec.postgresql.version
|
jsonPath: .spec.postgresql.version
|
||||||
- name: Pods
|
- name: Pods
|
||||||
type: integer
|
type: integer
|
||||||
description: Number of Pods per Postgres cluster
|
description: Number of Pods per Postgres cluster
|
||||||
JSONPath: .spec.numberOfInstances
|
jsonPath: .spec.numberOfInstances
|
||||||
- name: Volume
|
- name: Volume
|
||||||
type: string
|
type: string
|
||||||
description: Size of the bound volume
|
description: Size of the bound volume
|
||||||
JSONPath: .spec.volume.size
|
jsonPath: .spec.volume.size
|
||||||
- name: CPU-Request
|
- name: CPU-Request
|
||||||
type: string
|
type: string
|
||||||
description: Requested CPU for Postgres containers
|
description: Requested CPU for Postgres containers
|
||||||
JSONPath: .spec.resources.requests.cpu
|
jsonPath: .spec.resources.requests.cpu
|
||||||
- name: Memory-Request
|
- name: Memory-Request
|
||||||
type: string
|
type: string
|
||||||
description: Requested memory for Postgres containers
|
description: Requested memory for Postgres containers
|
||||||
JSONPath: .spec.resources.requests.memory
|
jsonPath: .spec.resources.requests.memory
|
||||||
- name: Age
|
- name: Age
|
||||||
type: date
|
type: date
|
||||||
JSONPath: .metadata.creationTimestamp
|
jsonPath: .metadata.creationTimestamp
|
||||||
- name: Status
|
- name: Status
|
||||||
type: string
|
type: string
|
||||||
description: Current sync status of postgresql resource
|
description: Current sync status of postgresql resource
|
||||||
JSONPath: .status.PostgresClusterStatus
|
jsonPath: .status.PostgresClusterStatus
|
||||||
schema:
|
schema:
|
||||||
openAPIV3Schema:
|
openAPIV3Schema:
|
||||||
type: object
|
type: object
|
||||||
|
|
|
||||||
|
|
@ -140,7 +140,7 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres
|
||||||
Secrets: make(map[types.UID]*v1.Secret),
|
Secrets: make(map[types.UID]*v1.Secret),
|
||||||
Services: make(map[PostgresRole]*v1.Service),
|
Services: make(map[PostgresRole]*v1.Service),
|
||||||
Endpoints: make(map[PostgresRole]*v1.Endpoints)},
|
Endpoints: make(map[PostgresRole]*v1.Endpoints)},
|
||||||
userSyncStrategy: users.DefaultUserSyncStrategy{password_encryption},
|
userSyncStrategy: users.DefaultUserSyncStrategy{PasswordEncryption: password_encryption},
|
||||||
deleteOptions: metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy},
|
deleteOptions: metav1.DeleteOptions{PropagationPolicy: &deletePropagationPolicy},
|
||||||
podEventsQueue: podEventsQueue,
|
podEventsQueue: podEventsQueue,
|
||||||
KubeClient: kubeClient,
|
KubeClient: kubeClient,
|
||||||
|
|
@ -626,14 +626,14 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
if oldSpec.Spec.PostgresqlParam.PgVersion >= newSpec.Spec.PostgresqlParam.PgVersion {
|
if oldSpec.Spec.PostgresqlParam.PgVersion > newSpec.Spec.PostgresqlParam.PgVersion {
|
||||||
c.logger.Warningf("postgresql version change(%q -> %q) has no effect",
|
c.logger.Warningf("postgresql version change(%q -> %q) has no effect",
|
||||||
oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion)
|
oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion)
|
||||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "PostgreSQL", "postgresql version change(%q -> %q) has no effect",
|
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "PostgreSQL", "postgresql version change(%q -> %q) has no effect",
|
||||||
oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion)
|
oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion)
|
||||||
// we need that hack to generate statefulset with the old version
|
// we need that hack to generate statefulset with the old version
|
||||||
newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion
|
newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion
|
||||||
} else {
|
} else if oldSpec.Spec.PostgresqlParam.PgVersion < newSpec.Spec.PostgresqlParam.PgVersion {
|
||||||
c.logger.Infof("postgresql version increased (%q -> %q), major version upgrade can be done manually after StatefulSet Sync",
|
c.logger.Infof("postgresql version increased (%q -> %q), major version upgrade can be done manually after StatefulSet Sync",
|
||||||
oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion)
|
oldSpec.Spec.PostgresqlParam.PgVersion, newSpec.Spec.PostgresqlParam.PgVersion)
|
||||||
}
|
}
|
||||||
|
|
@ -671,13 +671,21 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
||||||
|
|
||||||
// Volume
|
// Volume
|
||||||
if oldSpec.Spec.Size != newSpec.Spec.Size {
|
if oldSpec.Spec.Size != newSpec.Spec.Size {
|
||||||
c.logger.Debugf("syncing persistent volumes")
|
|
||||||
c.logVolumeChanges(oldSpec.Spec.Volume, newSpec.Spec.Volume)
|
c.logVolumeChanges(oldSpec.Spec.Volume, newSpec.Spec.Volume)
|
||||||
|
c.logger.Debugf("syncing volumes using %q storage resize mode", c.OpConfig.StorageResizeMode)
|
||||||
if err := c.syncVolumes(); err != nil {
|
if c.OpConfig.StorageResizeMode == "pvc" {
|
||||||
c.logger.Errorf("could not sync persistent volumes: %v", err)
|
if err := c.syncVolumeClaims(); err != nil {
|
||||||
updateFailed = true
|
c.logger.Errorf("could not sync persistent volume claims: %v", err)
|
||||||
|
updateFailed = true
|
||||||
|
}
|
||||||
|
} else if c.OpConfig.StorageResizeMode == "ebs" {
|
||||||
|
if err := c.syncVolumes(); err != nil {
|
||||||
|
c.logger.Errorf("could not sync persistent volumes: %v", err)
|
||||||
|
updateFailed = true
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
} else {
|
||||||
|
c.logger.Infof("Storage resize is disabled (storage_resize_mode is off). Skipping volume sync.")
|
||||||
}
|
}
|
||||||
|
|
||||||
// Statefulset
|
// Statefulset
|
||||||
|
|
|
||||||
|
|
@ -57,8 +57,8 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
c.logger.Debugf("syncing volumes using %q storage resize mode", c.OpConfig.StorageResizeMode)
|
||||||
if c.OpConfig.StorageResizeMode == "pvc" {
|
if c.OpConfig.StorageResizeMode == "pvc" {
|
||||||
c.logger.Debugf("syncing persistent volume claims")
|
|
||||||
if err = c.syncVolumeClaims(); err != nil {
|
if err = c.syncVolumeClaims(); err != nil {
|
||||||
err = fmt.Errorf("could not sync persistent volume claims: %v", err)
|
err = fmt.Errorf("could not sync persistent volume claims: %v", err)
|
||||||
return err
|
return err
|
||||||
|
|
@ -70,7 +70,6 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
||||||
// TODO: handle the case of the cluster that is downsized and enlarged again
|
// TODO: handle the case of the cluster that is downsized and enlarged again
|
||||||
// (there will be a volume from the old pod for which we can't act before the
|
// (there will be a volume from the old pod for which we can't act before the
|
||||||
// the statefulset modification is concluded)
|
// the statefulset modification is concluded)
|
||||||
c.logger.Debugf("syncing persistent volumes")
|
|
||||||
if err = c.syncVolumes(); err != nil {
|
if err = c.syncVolumes(); err != nil {
|
||||||
err = fmt.Errorf("could not sync persistent volumes: %v", err)
|
err = fmt.Errorf("could not sync persistent volumes: %v", err)
|
||||||
return err
|
return err
|
||||||
|
|
|
||||||
|
|
@ -62,7 +62,7 @@ func (c *Cluster) resizeVolumeClaims(newVolume acidv1.Volume) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("could not parse volume size: %v", err)
|
return fmt.Errorf("could not parse volume size: %v", err)
|
||||||
}
|
}
|
||||||
_, newSize, err := c.listVolumesWithManifestSize(newVolume)
|
newSize := quantityToGigabyte(newQuantity)
|
||||||
for _, pvc := range pvcs {
|
for _, pvc := range pvcs {
|
||||||
volumeSize := quantityToGigabyte(pvc.Spec.Resources.Requests[v1.ResourceStorage])
|
volumeSize := quantityToGigabyte(pvc.Spec.Resources.Requests[v1.ResourceStorage])
|
||||||
if volumeSize >= newSize {
|
if volumeSize >= newSize {
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,171 @@
|
||||||
|
package cluster
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"context"
|
||||||
|
|
||||||
|
v1 "k8s.io/api/core/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/api/resource"
|
||||||
|
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||||
|
"k8s.io/apimachinery/pkg/labels"
|
||||||
|
|
||||||
|
"github.com/stretchr/testify/assert"
|
||||||
|
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||||
|
"github.com/zalando/postgres-operator/pkg/util/config"
|
||||||
|
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||||
|
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||||
|
"k8s.io/client-go/kubernetes/fake"
|
||||||
|
)
|
||||||
|
|
||||||
|
func NewFakeKubernetesClient() (k8sutil.KubernetesClient, *fake.Clientset) {
|
||||||
|
clientSet := fake.NewSimpleClientset()
|
||||||
|
|
||||||
|
return k8sutil.KubernetesClient{
|
||||||
|
PersistentVolumeClaimsGetter: clientSet.CoreV1(),
|
||||||
|
}, clientSet
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestResizeVolumeClaim(t *testing.T) {
|
||||||
|
testName := "test resizing of persistent volume claims"
|
||||||
|
client, _ := NewFakeKubernetesClient()
|
||||||
|
clusterName := "acid-test-cluster"
|
||||||
|
namespace := "default"
|
||||||
|
newVolumeSize := "2Gi"
|
||||||
|
|
||||||
|
// new cluster with pvc storage resize mode and configured labels
|
||||||
|
var cluster = New(
|
||||||
|
Config{
|
||||||
|
OpConfig: config.Config{
|
||||||
|
Resources: config.Resources{
|
||||||
|
ClusterLabels: map[string]string{"application": "spilo"},
|
||||||
|
ClusterNameLabel: "cluster-name",
|
||||||
|
},
|
||||||
|
StorageResizeMode: "pvc",
|
||||||
|
},
|
||||||
|
}, client, acidv1.Postgresql{}, logger, eventRecorder)
|
||||||
|
|
||||||
|
// set metadata, so that labels will get correct values
|
||||||
|
cluster.Name = clusterName
|
||||||
|
cluster.Namespace = namespace
|
||||||
|
filterLabels := cluster.labelsSet(false)
|
||||||
|
|
||||||
|
// define and create PVCs for 1Gi volumes
|
||||||
|
storage1Gi, err := resource.ParseQuantity("1Gi")
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
pvcList := &v1.PersistentVolumeClaimList{
|
||||||
|
Items: []v1.PersistentVolumeClaim{
|
||||||
|
{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: constants.DataVolumeName + "-" + clusterName + "-0",
|
||||||
|
Namespace: namespace,
|
||||||
|
Labels: filterLabels,
|
||||||
|
},
|
||||||
|
Spec: v1.PersistentVolumeClaimSpec{
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceStorage: storage1Gi,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: constants.DataVolumeName + "-" + clusterName + "-1",
|
||||||
|
Namespace: namespace,
|
||||||
|
Labels: filterLabels,
|
||||||
|
},
|
||||||
|
Spec: v1.PersistentVolumeClaimSpec{
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceStorage: storage1Gi,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
ObjectMeta: metav1.ObjectMeta{
|
||||||
|
Name: constants.DataVolumeName + "-" + clusterName + "-2-0",
|
||||||
|
Namespace: namespace,
|
||||||
|
Labels: labels.Set{},
|
||||||
|
},
|
||||||
|
Spec: v1.PersistentVolumeClaimSpec{
|
||||||
|
Resources: v1.ResourceRequirements{
|
||||||
|
Requests: v1.ResourceList{
|
||||||
|
v1.ResourceStorage: storage1Gi,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, pvc := range pvcList.Items {
|
||||||
|
cluster.KubeClient.PersistentVolumeClaims(namespace).Create(context.TODO(), &pvc, metav1.CreateOptions{})
|
||||||
|
}
|
||||||
|
|
||||||
|
// test resizing
|
||||||
|
cluster.resizeVolumeClaims(acidv1.Volume{Size: newVolumeSize})
|
||||||
|
|
||||||
|
pvcs, err := cluster.listPersistentVolumeClaims()
|
||||||
|
assert.NoError(t, err)
|
||||||
|
|
||||||
|
// check if listPersistentVolumeClaims returns only the PVCs matching the filter
|
||||||
|
if len(pvcs) != len(pvcList.Items)-1 {
|
||||||
|
t.Errorf("%s: could not find all PVCs, got %v, expected %v", testName, len(pvcs), len(pvcList.Items)-1)
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if PVCs were correctly resized
|
||||||
|
for _, pvc := range pvcs {
|
||||||
|
newStorageSize := quantityToGigabyte(pvc.Spec.Resources.Requests[v1.ResourceStorage])
|
||||||
|
expectedQuantity, err := resource.ParseQuantity(newVolumeSize)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
expectedSize := quantityToGigabyte(expectedQuantity)
|
||||||
|
if newStorageSize != expectedSize {
|
||||||
|
t.Errorf("%s: resizing failed, got %v, expected %v", testName, newStorageSize, expectedSize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// check if other PVC was not resized
|
||||||
|
pvc2, err := cluster.KubeClient.PersistentVolumeClaims(namespace).Get(context.TODO(), constants.DataVolumeName+"-"+clusterName+"-2-0", metav1.GetOptions{})
|
||||||
|
assert.NoError(t, err)
|
||||||
|
unchangedSize := quantityToGigabyte(pvc2.Spec.Resources.Requests[v1.ResourceStorage])
|
||||||
|
expectedSize := quantityToGigabyte(storage1Gi)
|
||||||
|
if unchangedSize != expectedSize {
|
||||||
|
t.Errorf("%s: volume size changed, got %v, expected %v", testName, unchangedSize, expectedSize)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestQuantityToGigabyte(t *testing.T) {
|
||||||
|
tests := []struct {
|
||||||
|
name string
|
||||||
|
quantityStr string
|
||||||
|
expected int64
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
"test with 1Gi",
|
||||||
|
"1Gi",
|
||||||
|
1,
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test with float",
|
||||||
|
"1.5Gi",
|
||||||
|
int64(1),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"test with 1000Mi",
|
||||||
|
"1000Mi",
|
||||||
|
int64(0),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tt := range tests {
|
||||||
|
quantity, err := resource.ParseQuantity(tt.quantityStr)
|
||||||
|
assert.NoError(t, err)
|
||||||
|
gigabyte := quantityToGigabyte(quantity)
|
||||||
|
if gigabyte != tt.expected {
|
||||||
|
t.Errorf("%s: got %v, expected %v", tt.name, gigabyte, tt.expected)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -341,9 +341,7 @@ func (c *Controller) getInfrastructureRole(
|
||||||
util.Coalesce(string(secretData[infraRole.RoleKey]), infraRole.DefaultRoleValue))
|
util.Coalesce(string(secretData[infraRole.RoleKey]), infraRole.DefaultRoleValue))
|
||||||
}
|
}
|
||||||
|
|
||||||
if roleDescr.Valid() {
|
if !roleDescr.Valid() {
|
||||||
roles = append(roles, *roleDescr)
|
|
||||||
} else {
|
|
||||||
msg := "infrastructure role %q is not complete and ignored"
|
msg := "infrastructure role %q is not complete and ignored"
|
||||||
c.logger.Warningf(msg, roleDescr)
|
c.logger.Warningf(msg, roleDescr)
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue