Merge branch 'master' into replica-pooler

This commit is contained in:
Rafia Sabih 2020-10-05 11:47:47 +02:00
commit 86e6a51fa9
38 changed files with 776 additions and 403 deletions

View File

@ -0,0 +1,19 @@
---
name: Postgres Operator issue template
about: How are you using the operator?
title: ''
labels: ''
assignees: ''
---
Please, answer some short questions which should help us to understand your problem / question better?
- **Which image of the operator are you using?** e.g. registry.opensource.zalan.do/acid/postgres-operator:v1.5.0
- **Where do you run it - cloud or metal? Kubernetes or OpenShift?** [AWS K8s | GCP ... | Bare Metal K8s]
- **Are you running Postgres Operator in production?** [yes | no]
- **Type of issue?** [Bug report, question, feature request, etc.]
Some general remarks when posting a bug report:
- Please, check the operator, pod (Patroni) and postgresql logs first. When copy-pasting many log lines please do it in a separate GitHub gist together with your Postgres CRD and configuration manifest.
- If you feel this issue might be more related to the [Spilo](https://github.com/zalando/spilo/issues) docker image or [Patroni](https://github.com/zalando/patroni/issues), consider opening issues in the respective repos.

View File

@ -97,4 +97,4 @@ test:
GO111MODULE=on go test ./... GO111MODULE=on go test ./...
e2e: docker # build operator image to be tested e2e: docker # build operator image to be tested
cd e2e; make tools e2etest clean cd e2e; make e2etest

View File

@ -76,12 +76,6 @@ There is a browser-friendly version of this documentation at
* [Postgres manifest reference](docs/reference/cluster_manifest.md) * [Postgres manifest reference](docs/reference/cluster_manifest.md)
* [Command-line options and environment variables](docs/reference/command_line_and_environment.md) * [Command-line options and environment variables](docs/reference/command_line_and_environment.md)
## Google Summer of Code
The Postgres Operator made it to the [Google Summer of Code 2019](https://summerofcode.withgoogle.com/organizations/5429926902104064/)!
Check [our ideas](docs/gsoc-2019/ideas.md#google-summer-of-code-2019)
and start discussions in [the issue tracker](https://github.com/zalando/postgres-operator/issues).
## Community ## Community
There are two places to get in touch with the community: There are two places to get in touch with the community:

View File

@ -200,6 +200,10 @@ spec:
type: string type: string
secret_name_template: secret_name_template:
type: string type: string
spilo_runasuser:
type: integer
spilo_runasgroup:
type: integer
spilo_fsgroup: spilo_fsgroup:
type: integer type: integer
spilo_privileged: spilo_privileged:
@ -259,6 +263,11 @@ spec:
type: boolean type: boolean
enable_replica_load_balancer: enable_replica_load_balancer:
type: boolean type: boolean
external_traffic_policy:
type: string
enum:
- "Cluster"
- "Local"
master_dns_name_format: master_dns_name_format:
type: string type: string
replica_dns_name_format: replica_dns_name_format:

View File

@ -376,6 +376,10 @@ spec:
items: items:
type: object type: object
additionalProperties: true additionalProperties: true
spiloRunAsUser:
type: integer
spiloRunAsGroup:
type: integer
spiloFSGroup: spiloFSGroup:
type: integer type: integer
standby: standby:

View File

@ -9,6 +9,9 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
data: data:
{{- if .Values.podPriorityClassName }}
pod_priority_class_name: {{ .Values.podPriorityClassName }}
{{- end }}
pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }} pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }}
{{ toYaml .Values.configGeneral | indent 2 }} {{ toYaml .Values.configGeneral | indent 2 }}
{{ toYaml .Values.configUsers | indent 2 }} {{ toYaml .Values.configUsers | indent 2 }}

View File

@ -13,6 +13,9 @@ configuration:
users: users:
{{ toYaml .Values.configUsers | indent 4 }} {{ toYaml .Values.configUsers | indent 4 }}
kubernetes: kubernetes:
{{- if .Values.podPriorityClassName }}
pod_priority_class_name: {{ .Values.podPriorityClassName }}
{{- end }}
pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }} pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }}
oauth_token_secret_name: {{ template "postgres-operator.fullname" . }} oauth_token_secret_name: {{ template "postgres-operator.fullname" . }}
{{ toYaml .Values.configKubernetes | indent 4 }} {{ toYaml .Values.configKubernetes | indent 4 }}

View File

@ -0,0 +1,15 @@
{{- if .Values.podPriorityClassName }}
apiVersion: scheduling.k8s.io/v1
description: 'Use only for databases controlled by Postgres operator'
kind: PriorityClass
metadata:
labels:
app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
helm.sh/chart: {{ template "postgres-operator.chart" . }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }}
name: {{ .Values.podPriorityClassName }}
preemptionPolicy: PreemptLowerPriority
globalDefault: false
value: 1000000
{{- end }}

View File

@ -127,6 +127,9 @@ configKubernetes:
pod_terminate_grace_period: 5m pod_terminate_grace_period: 5m
# template for database user secrets generated by the operator # template for database user secrets generated by the operator
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
# set user and group for the spilo container (required to run Spilo as non-root process)
# spilo_runasuser: "101"
# spilo_runasgroup: "103"
# group ID with write-access to volumes (required to run Spilo as non-root process) # group ID with write-access to volumes (required to run Spilo as non-root process)
# spilo_fsgroup: 103 # spilo_fsgroup: 103
@ -180,6 +183,8 @@ configLoadBalancer:
enable_master_load_balancer: false enable_master_load_balancer: false
# toggles service type load balancer pointing to the replica pod of the cluster # toggles service type load balancer pointing to the replica pod of the cluster
enable_replica_load_balancer: false enable_replica_load_balancer: false
# define external traffic policy for the load balancer
external_traffic_policy: "Cluster"
# defines the DNS name string template for the master load balancer cluster # defines the DNS name string template for the master load balancer cluster
master_dns_name_format: "{cluster}.{team}.{hostedzone}" master_dns_name_format: "{cluster}.{team}.{hostedzone}"
# defines the DNS name string template for the replica load balancer cluster # defines the DNS name string template for the replica load balancer cluster
@ -315,8 +320,12 @@ podServiceAccount:
# If not set a name is generated using the fullname template and "-pod" suffix # If not set a name is generated using the fullname template and "-pod" suffix
name: "postgres-pod" name: "postgres-pod"
# priority class for operator pod
priorityClassName: "" priorityClassName: ""
# priority class for database pods
podPriorityClassName: ""
resources: resources:
limits: limits:
cpu: 500m cpu: 500m

View File

@ -118,6 +118,9 @@ configKubernetes:
pod_terminate_grace_period: 5m pod_terminate_grace_period: 5m
# template for database user secrets generated by the operator # template for database user secrets generated by the operator
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
# set user and group for the spilo container (required to run Spilo as non-root process)
# spilo_runasuser: "101"
# spilo_runasgroup: "103"
# group ID with write-access to volumes (required to run Spilo as non-root process) # group ID with write-access to volumes (required to run Spilo as non-root process)
# spilo_fsgroup: "103" # spilo_fsgroup: "103"
@ -169,6 +172,8 @@ configLoadBalancer:
enable_master_load_balancer: "false" enable_master_load_balancer: "false"
# toggles service type load balancer pointing to the replica pod of the cluster # toggles service type load balancer pointing to the replica pod of the cluster
enable_replica_load_balancer: "false" enable_replica_load_balancer: "false"
# define external traffic policy for the load balancer
external_traffic_policy: "Cluster"
# defines the DNS name string template for the master load balancer cluster # defines the DNS name string template for the master load balancer cluster
master_dns_name_format: '{cluster}.{team}.{hostedzone}' master_dns_name_format: '{cluster}.{team}.{hostedzone}'
# defines the DNS name string template for the replica load balancer cluster # defines the DNS name string template for the replica load balancer cluster
@ -307,8 +312,12 @@ podServiceAccount:
# If not set a name is generated using the fullname template and "-pod" suffix # If not set a name is generated using the fullname template and "-pod" suffix
name: "postgres-pod" name: "postgres-pod"
# priority class for operator pod
priorityClassName: "" priorityClassName: ""
# priority class for database pods
podPriorityClassName: ""
resources: resources:
limits: limits:
cpu: 500m cpu: 500m

View File

@ -2,6 +2,10 @@ version: "2017-09-20"
pipeline: pipeline:
- id: build-postgres-operator - id: build-postgres-operator
type: script type: script
vm: large
cache:
paths:
- /go/pkg/mod
commands: commands:
- desc: 'Update' - desc: 'Update'
cmd: | cmd: |

View File

@ -237,9 +237,11 @@ kubectl logs acid-minimal-cluster-0
## End-to-end tests ## End-to-end tests
The operator provides reference end-to-end tests (e2e) (as Docker image) to The operator provides reference end-to-end (e2e) tests to
ensure various infrastructure parts work smoothly together. Each e2e execution ensure various infrastructure parts work smoothly together. The test code is available at `e2e/tests`.
tests a Postgres Operator image built from the current git branch. The test The special `registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner` image is used to run the tests. The container mounts the local `e2e/tests` directory at runtime, so whatever you modify in your local copy of the tests will be executed by a test runner. By maintaining a separate test runner image we avoid the need to re-build the e2e test image on every build.
Each e2e execution tests a Postgres Operator image built from the current git branch. The test
runner creates a new local K8s cluster using [kind](https://kind.sigs.k8s.io/), runner creates a new local K8s cluster using [kind](https://kind.sigs.k8s.io/),
utilizes provided manifest examples, and runs e2e tests contained in the `tests` utilizes provided manifest examples, and runs e2e tests contained in the `tests`
folder. The K8s API client in the container connects to the `kind` cluster via folder. The K8s API client in the container connects to the `kind` cluster via

View File

@ -65,6 +65,16 @@ These parameters are grouped directly under the `spec` key in the manifest.
custom Docker image that overrides the **docker_image** operator parameter. custom Docker image that overrides the **docker_image** operator parameter.
It should be a [Spilo](https://github.com/zalando/spilo) image. Optional. It should be a [Spilo](https://github.com/zalando/spilo) image. Optional.
* **spiloRunAsUser**
sets the user ID which should be used in the container to run the process.
This must be set to run the container without root. By default the container
runs with root. This option only works for Spilo versions >= 1.6-p3.
* **spiloRunAsGroup**
sets the group ID which should be used in the container to run the process.
This must be set to run the container without root. By default the container
runs with root. This option only works for Spilo versions >= 1.6-p3.
* **spiloFSGroup** * **spiloFSGroup**
the Persistent Volumes for the Spilo pods in the StatefulSet will be owned and the Persistent Volumes for the Spilo pods in the StatefulSet will be owned and
writable by the group ID specified. This will override the **spilo_fsgroup** writable by the group ID specified. This will override the **spilo_fsgroup**

View File

@ -317,6 +317,16 @@ configuration they are grouped under the `kubernetes` key.
that should be assigned to the Postgres pods. The priority class itself must that should be assigned to the Postgres pods. The priority class itself must
be defined in advance. Default is empty (use the default priority class). be defined in advance. Default is empty (use the default priority class).
* **spilo_runasuser**
sets the user ID which should be used in the container to run the process.
This must be set to run the container without root. By default the container
runs with root. This option only works for Spilo versions >= 1.6-p3.
* **spilo_runasgroup**
sets the group ID which should be used in the container to run the process.
This must be set to run the container without root. By default the container
runs with root. This option only works for Spilo versions >= 1.6-p3.
* **spilo_fsgroup** * **spilo_fsgroup**
the Persistent Volumes for the Spilo pods in the StatefulSet will be owned and the Persistent Volumes for the Spilo pods in the StatefulSet will be owned and
writable by the group ID specified. This is required to run Spilo as a writable by the group ID specified. This is required to run Spilo as a
@ -424,6 +434,12 @@ CRD-based configuration.
Those options affect the behavior of load balancers created by the operator. Those options affect the behavior of load balancers created by the operator.
In the CRD-based configuration they are grouped under the `load_balancer` key. In the CRD-based configuration they are grouped under the `load_balancer` key.
* **custom_service_annotations**
This key/value map provides a list of annotations that get attached to each
service of a cluster created by the operator. If the annotation key is also
provided by the cluster definition, the manifest value is used.
Optional.
* **db_hosted_zone** * **db_hosted_zone**
DNS zone for the cluster DNS name when the load balancer is configured for DNS zone for the cluster DNS name when the load balancer is configured for
the cluster. Only used when combined with the cluster. Only used when combined with
@ -440,11 +456,8 @@ In the CRD-based configuration they are grouped under the `load_balancer` key.
cluster. Can be overridden by individual cluster settings. The default is cluster. Can be overridden by individual cluster settings. The default is
`false`. `false`.
* **custom_service_annotations** * **external_traffic_policy** defines external traffic policy for load
This key/value map provides a list of annotations that get attached to each balancers. Allowed values are `Cluster` (default) and `Local`.
service of a cluster created by the operator. If the annotation key is also
provided by the cluster definition, the manifest value is used.
Optional.
* **master_dns_name_format** defines the DNS name string template for the * **master_dns_name_format** defines the DNS name string template for the
master load balancer cluster. The default is master load balancer cluster. The default is

View File

@ -1,11 +1,12 @@
# An image to perform the actual test. Do not forget to copy all necessary test # An image to run e2e tests.
# files here. # The image does not include the tests; all necessary files are bind-mounted when a container starts.
FROM ubuntu:18.04 FROM ubuntu:20.04
LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>" LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>"
COPY manifests ./manifests ENV TERM xterm-256color
COPY exec.sh ./exec.sh
COPY requirements.txt tests ./ COPY requirements.txt ./
COPY scm-source.json ./
RUN apt-get update \ RUN apt-get update \
&& apt-get install --no-install-recommends -y \ && apt-get install --no-install-recommends -y \
@ -14,13 +15,10 @@ RUN apt-get update \
python3-pip \ python3-pip \
curl \ curl \
&& pip3 install --no-cache-dir -r requirements.txt \ && pip3 install --no-cache-dir -r requirements.txt \
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl \ && curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl \
&& chmod +x ./kubectl \ && chmod +x ./kubectl \
&& mv ./kubectl /usr/local/bin/kubectl \ && mv ./kubectl /usr/local/bin/kubectl \
&& apt-get clean \ && apt-get clean \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*
ARG VERSION=dev ENTRYPOINT ["python3", "-m", "unittest", "discover", "--start-directory", ".", "-v"]
RUN sed -i "s/__version__ = .*/__version__ = '${VERSION}'/" ./__init__.py
CMD ["python3", "-m", "unittest", "discover", "--start-directory", ".", "-v"]

View File

@ -1,6 +1,6 @@
.PHONY: clean copy docker push tools test .PHONY: clean copy docker push tools test
BINARY ?= postgres-operator-e2e-tests BINARY ?= postgres-operator-e2e-tests-runner
BUILD_FLAGS ?= -v BUILD_FLAGS ?= -v
CGO_ENABLED ?= 0 CGO_ENABLED ?= 0
ifeq ($(RACE),1) ifeq ($(RACE),1)
@ -34,15 +34,20 @@ copy: clean
mkdir manifests mkdir manifests
cp ../manifests -r . cp ../manifests -r .
docker: copy docker: scm-source.json
docker build --build-arg "VERSION=$(VERSION)" -t "$(IMAGE):$(TAG)" . docker build -t "$(IMAGE):$(TAG)" .
scm-source.json: ../.git
echo '{\n "url": "git:$(GITURL)",\n "revision": "$(GITHEAD)",\n "author": "$(USER)",\n "status": "$(GITSTATUS)"\n}' > scm-source.json
push: docker push: docker
docker push "$(IMAGE):$(TAG)" docker push "$(IMAGE):$(TAG)"
tools: docker tools:
# install pinned version of 'kind' # install pinned version of 'kind'
GO111MODULE=on go get sigs.k8s.io/kind@v0.5.1 # go get must run outside of a dir with a (module-based) Go project !
# otherwise go get updates project's dependencies and/or behaves differently
cd "/tmp" && GO111MODULE=on go get sigs.k8s.io/kind@v0.9.0
e2etest: e2etest: tools copy clean
./run.sh ./run.sh main

View File

@ -1,5 +1,5 @@
kind: Cluster kind: Cluster
apiVersion: kind.sigs.k8s.io/v1alpha3 apiVersion: kind.x-k8s.io/v1alpha4
nodes: nodes:
- role: control-plane - role: control-plane
- role: worker - role: worker

View File

@ -6,57 +6,67 @@ set -o nounset
set -o pipefail set -o pipefail
IFS=$'\n\t' IFS=$'\n\t'
cd $(dirname "$0");
readonly cluster_name="postgres-operator-e2e-tests" readonly cluster_name="postgres-operator-e2e-tests"
readonly kubeconfig_path="/tmp/kind-config-${cluster_name}" readonly kubeconfig_path="/tmp/kind-config-${cluster_name}"
readonly spilo_image="registry.opensource.zalan.do/acid/spilo-12:1.6-p5"
echo "Clustername: ${cluster_name}"
echo "Kubeconfig path: ${kubeconfig_path}"
function pull_images(){ function pull_images(){
operator_tag=$(git describe --tags --always --dirty) operator_tag=$(git describe --tags --always --dirty)
if [[ -z $(docker images -q registry.opensource.zalan.do/acid/postgres-operator:${operator_tag}) ]] if [[ -z $(docker images -q registry.opensource.zalan.do/acid/postgres-operator:${operator_tag}) ]]
then then
docker pull registry.opensource.zalan.do/acid/postgres-operator:latest docker pull registry.opensource.zalan.do/acid/postgres-operator:latest
fi fi
if [[ -z $(docker images -q registry.opensource.zalan.do/acid/postgres-operator-e2e-tests:${operator_tag}) ]]
then
docker pull registry.opensource.zalan.do/acid/postgres-operator-e2e-tests:latest
fi
operator_image=$(docker images --filter=reference="registry.opensource.zalan.do/acid/postgres-operator" --format "{{.Repository}}:{{.Tag}}" | head -1) operator_image=$(docker images --filter=reference="registry.opensource.zalan.do/acid/postgres-operator" --format "{{.Repository}}:{{.Tag}}" | head -1)
e2e_test_image=$(docker images --filter=reference="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests" --format "{{.Repository}}:{{.Tag}}" | head -1)
# this image does not contain the tests; a container mounts them from a local "./tests" dir at start time
e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:latest"
docker pull ${e2e_test_runner_image}
} }
function start_kind(){ function start_kind(){
echo "Starting kind for e2e tests"
# avoid interference with previous test runs # avoid interference with previous test runs
if [[ $(kind get clusters | grep "^${cluster_name}*") != "" ]] if [[ $(kind get clusters | grep "^${cluster_name}*") != "" ]]
then then
kind delete cluster --name ${cluster_name} kind delete cluster --name ${cluster_name}
fi fi
export KUBECONFIG="${kubeconfig_path}"
kind create cluster --name ${cluster_name} --config kind-cluster-postgres-operator-e2e-tests.yaml kind create cluster --name ${cluster_name} --config kind-cluster-postgres-operator-e2e-tests.yaml
kind load docker-image "${operator_image}" --name ${cluster_name} kind load docker-image "${operator_image}" --name ${cluster_name}
kind load docker-image "${e2e_test_image}" --name ${cluster_name} docker pull "${spilo_image}"
KUBECONFIG="$(kind get kubeconfig-path --name=${cluster_name})" kind load docker-image "${spilo_image}" --name ${cluster_name}
export KUBECONFIG
} }
function set_kind_api_server_ip(){ function set_kind_api_server_ip(){
echo "Setting up kind API server ip"
# use the actual kubeconfig to connect to the 'kind' API server # use the actual kubeconfig to connect to the 'kind' API server
# but update the IP address of the API server to the one from the Docker 'bridge' network # but update the IP address of the API server to the one from the Docker 'bridge' network
cp "${KUBECONFIG}" /tmp
readonly local kind_api_server_port=6443 # well-known in the 'kind' codebase readonly local kind_api_server_port=6443 # well-known in the 'kind' codebase
readonly local kind_api_server=$(docker inspect --format "{{ .NetworkSettings.IPAddress }}:${kind_api_server_port}" "${cluster_name}"-control-plane) readonly local kind_api_server=$(docker inspect --format "{{ .NetworkSettings.Networks.kind.IPAddress }}:${kind_api_server_port}" "${cluster_name}"-control-plane)
sed -i "s/server.*$/server: https:\/\/$kind_api_server/g" "${kubeconfig_path}" sed -i "s/server.*$/server: https:\/\/$kind_api_server/g" "${kubeconfig_path}"
} }
function run_tests(){ function run_tests(){
echo "Running tests..."
docker run --rm --mount type=bind,source="$(readlink -f ${kubeconfig_path})",target=/root/.kube/config -e OPERATOR_IMAGE="${operator_image}" "${e2e_test_image}" # tests modify files in ./manifests, so we mount a copy of this directory done by the e2e Makefile
docker run --rm --network=host -e "TERM=xterm-256color" \
--mount type=bind,source="$(readlink -f ${kubeconfig_path})",target=/root/.kube/config \
--mount type=bind,source="$(readlink -f manifests)",target=/manifests \
--mount type=bind,source="$(readlink -f tests)",target=/tests \
--mount type=bind,source="$(readlink -f exec.sh)",target=/exec.sh \
-e OPERATOR_IMAGE="${operator_image}" "${e2e_test_runner_image}"
} }
function clean_up(){ function clean_up(){
echo "Executing cleanup"
unset KUBECONFIG unset KUBECONFIG
kind delete cluster --name ${cluster_name} kind delete cluster --name ${cluster_name}
rm -rf ${kubeconfig_path} rm -rf ${kubeconfig_path}
@ -66,11 +76,11 @@ function main(){
trap "clean_up" QUIT TERM EXIT trap "clean_up" QUIT TERM EXIT
pull_images time pull_images
start_kind time start_kind
set_kind_api_server_ip time set_kind_api_server_ip
run_tests run_tests
exit 0 exit 0
} }
main "$@" "$@"

View File

@ -34,10 +34,14 @@ class EndToEndTestCase(unittest.TestCase):
In the case of test failure the cluster will stay to enable manual examination; In the case of test failure the cluster will stay to enable manual examination;
next invocation of "make test" will re-create it. next invocation of "make test" will re-create it.
''' '''
print("Test Setup being executed")
# set a single K8s wrapper for all tests # set a single K8s wrapper for all tests
k8s = cls.k8s = K8s() k8s = cls.k8s = K8s()
# remove existing local storage class and create hostpath class
k8s.api.storage_v1_api.delete_storage_class("standard")
# operator deploys pod service account there on start up # operator deploys pod service account there on start up
# needed for test_multi_namespace_support() # needed for test_multi_namespace_support()
cls.namespace = "test" cls.namespace = "test"
@ -54,7 +58,8 @@ class EndToEndTestCase(unittest.TestCase):
"configmap.yaml", "configmap.yaml",
"postgres-operator.yaml", "postgres-operator.yaml",
"infrastructure-roles.yaml", "infrastructure-roles.yaml",
"infrastructure-roles-new.yaml"]: "infrastructure-roles-new.yaml",
"e2e-storage-class.yaml"]:
result = k8s.create_with_kubectl("manifests/" + filename) result = k8s.create_with_kubectl("manifests/" + filename)
print("stdout: {}, stderr: {}".format(result.stdout, result.stderr)) print("stdout: {}, stderr: {}".format(result.stdout, result.stderr))
@ -159,45 +164,97 @@ class EndToEndTestCase(unittest.TestCase):
k8s = self.k8s k8s = self.k8s
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster' cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
# enable load balancer services try:
pg_patch_enable_lbs = { # enable load balancer services
"spec": { pg_patch_enable_lbs = {
"enableMasterLoadBalancer": True, "spec": {
"enableReplicaLoadBalancer": True "enableMasterLoadBalancer": True,
"enableReplicaLoadBalancer": True
}
} }
} k8s.api.custom_objects_api.patch_namespaced_custom_object(
k8s.api.custom_objects_api.patch_namespaced_custom_object( "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_lbs)
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_lbs) # wait for service recreation
# wait for service recreation time.sleep(60)
time.sleep(60)
master_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=master') master_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=master')
self.assertEqual(master_svc_type, 'LoadBalancer', self.assertEqual(master_svc_type, 'LoadBalancer',
"Expected LoadBalancer service type for master, found {}".format(master_svc_type)) "Expected LoadBalancer service type for master, found {}".format(master_svc_type))
repl_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=replica') repl_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=replica')
self.assertEqual(repl_svc_type, 'LoadBalancer', self.assertEqual(repl_svc_type, 'LoadBalancer',
"Expected LoadBalancer service type for replica, found {}".format(repl_svc_type)) "Expected LoadBalancer service type for replica, found {}".format(repl_svc_type))
# disable load balancer services again # disable load balancer services again
pg_patch_disable_lbs = { pg_patch_disable_lbs = {
"spec": { "spec": {
"enableMasterLoadBalancer": False, "enableMasterLoadBalancer": False,
"enableReplicaLoadBalancer": False "enableReplicaLoadBalancer": False
}
} }
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_lbs)
# wait for service recreation
time.sleep(60)
master_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=master')
self.assertEqual(master_svc_type, 'ClusterIP',
"Expected ClusterIP service type for master, found {}".format(master_svc_type))
repl_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=replica')
self.assertEqual(repl_svc_type, 'ClusterIP',
"Expected ClusterIP service type for replica, found {}".format(repl_svc_type))
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_infrastructure_roles(self):
'''
Test using external secrets for infrastructure roles
'''
k8s = self.k8s
# update infrastructure roles description
secret_name = "postgresql-infrastructure-roles"
roles = "secretname: postgresql-infrastructure-roles-new, \
userkey: user, rolekey: memberof, passwordkey: password, defaultrolevalue: robot_zmon"
patch_infrastructure_roles = {
"data": {
"infrastructure_roles_secret_name": secret_name,
"infrastructure_roles_secrets": roles,
},
} }
k8s.api.custom_objects_api.patch_namespaced_custom_object( k8s.update_config(patch_infrastructure_roles)
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_lbs)
# wait for service recreation
time.sleep(60)
master_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=master') # wait a little before proceeding
self.assertEqual(master_svc_type, 'ClusterIP', time.sleep(30)
"Expected ClusterIP service type for master, found {}".format(master_svc_type))
repl_svc_type = k8s.get_service_type(cluster_label + ',spilo-role=replica') try:
self.assertEqual(repl_svc_type, 'ClusterIP', # check that new roles are represented in the config by requesting the
"Expected ClusterIP service type for replica, found {}".format(repl_svc_type)) # operator configuration via API
operator_pod = k8s.get_operator_pod()
get_config_cmd = "wget --quiet -O - localhost:8080/config"
result = k8s.exec_with_kubectl(operator_pod.metadata.name, get_config_cmd)
roles_dict = (json.loads(result.stdout)
.get("controller", {})
.get("InfrastructureRoles"))
self.assertTrue("robot_zmon_acid_monitoring_new" in roles_dict)
role = roles_dict["robot_zmon_acid_monitoring_new"]
role.pop("Password", None)
self.assertDictEqual(role, {
"Name": "robot_zmon_acid_monitoring_new",
"Flags": None,
"MemberOf": ["robot_zmon"],
"Parameters": None,
"AdminRole": "",
"Origin": 2,
})
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_lazy_spilo_upgrade(self): def test_lazy_spilo_upgrade(self):
@ -226,38 +283,45 @@ class EndToEndTestCase(unittest.TestCase):
pod0 = 'acid-minimal-cluster-0' pod0 = 'acid-minimal-cluster-0'
pod1 = 'acid-minimal-cluster-1' pod1 = 'acid-minimal-cluster-1'
# restart the pod to get a container with the new image try:
k8s.api.core_v1.delete_namespaced_pod(pod0, 'default') # restart the pod to get a container with the new image
time.sleep(60) k8s.api.core_v1.delete_namespaced_pod(pod0, 'default')
time.sleep(60)
# lazy update works if the restarted pod and older pods run different Spilo versions # lazy update works if the restarted pod and older pods run different Spilo versions
new_image = k8s.get_effective_pod_image(pod0) new_image = k8s.get_effective_pod_image(pod0)
old_image = k8s.get_effective_pod_image(pod1) old_image = k8s.get_effective_pod_image(pod1)
self.assertNotEqual(new_image, old_image, "Lazy updated failed: pods have the same image {}".format(new_image)) self.assertNotEqual(new_image, old_image,
"Lazy updated failed: pods have the same image {}".format(new_image))
# sanity check # sanity check
assert_msg = "Image {} of a new pod differs from {} in operator conf".format(new_image, conf_image) assert_msg = "Image {} of a new pod differs from {} in operator conf".format(new_image, conf_image)
self.assertEqual(new_image, conf_image, assert_msg) self.assertEqual(new_image, conf_image, assert_msg)
# clean up # clean up
unpatch_lazy_spilo_upgrade = { unpatch_lazy_spilo_upgrade = {
"data": { "data": {
"enable_lazy_spilo_upgrade": "false", "enable_lazy_spilo_upgrade": "false",
}
} }
} k8s.update_config(unpatch_lazy_spilo_upgrade)
k8s.update_config(unpatch_lazy_spilo_upgrade)
# at this point operator will complete the normal rolling upgrade # at this point operator will complete the normal rolling upgrade
# so we additonally test if disabling the lazy upgrade - forcing the normal rolling upgrade - works # so we additonally test if disabling the lazy upgrade - forcing the normal rolling upgrade - works
# XXX there is no easy way to wait until the end of Sync() # XXX there is no easy way to wait until the end of Sync()
time.sleep(60) time.sleep(60)
image0 = k8s.get_effective_pod_image(pod0) image0 = k8s.get_effective_pod_image(pod0)
image1 = k8s.get_effective_pod_image(pod1) image1 = k8s.get_effective_pod_image(pod1)
assert_msg = "Disabling lazy upgrade failed: pods still have different images {} and {}".format(image0, image1) assert_msg = "Disabling lazy upgrade failed: pods still have different \
self.assertEqual(image0, image1, assert_msg) images {} and {}".format(image0, image1)
self.assertEqual(image0, image1, assert_msg)
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_logical_backup_cron_job(self): def test_logical_backup_cron_job(self):
@ -283,45 +347,51 @@ class EndToEndTestCase(unittest.TestCase):
} }
k8s.api.custom_objects_api.patch_namespaced_custom_object( k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_backup) "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_backup)
k8s.wait_for_logical_backup_job_creation()
jobs = k8s.get_logical_backup_job().items try:
self.assertEqual(1, len(jobs), "Expected 1 logical backup job, found {}".format(len(jobs))) k8s.wait_for_logical_backup_job_creation()
job = jobs[0] jobs = k8s.get_logical_backup_job().items
self.assertEqual(job.metadata.name, "logical-backup-acid-minimal-cluster", self.assertEqual(1, len(jobs), "Expected 1 logical backup job, found {}".format(len(jobs)))
"Expected job name {}, found {}"
.format("logical-backup-acid-minimal-cluster", job.metadata.name))
self.assertEqual(job.spec.schedule, schedule,
"Expected {} schedule, found {}"
.format(schedule, job.spec.schedule))
# update the cluster-wide image of the logical backup pod job = jobs[0]
image = "test-image-name" self.assertEqual(job.metadata.name, "logical-backup-acid-minimal-cluster",
patch_logical_backup_image = { "Expected job name {}, found {}"
"data": { .format("logical-backup-acid-minimal-cluster", job.metadata.name))
"logical_backup_docker_image": image, self.assertEqual(job.spec.schedule, schedule,
"Expected {} schedule, found {}"
.format(schedule, job.spec.schedule))
# update the cluster-wide image of the logical backup pod
image = "test-image-name"
patch_logical_backup_image = {
"data": {
"logical_backup_docker_image": image,
}
} }
} k8s.update_config(patch_logical_backup_image)
k8s.update_config(patch_logical_backup_image)
jobs = k8s.get_logical_backup_job().items jobs = k8s.get_logical_backup_job().items
actual_image = jobs[0].spec.job_template.spec.template.spec.containers[0].image actual_image = jobs[0].spec.job_template.spec.template.spec.containers[0].image
self.assertEqual(actual_image, image, self.assertEqual(actual_image, image,
"Expected job image {}, found {}".format(image, actual_image)) "Expected job image {}, found {}".format(image, actual_image))
# delete the logical backup cron job # delete the logical backup cron job
pg_patch_disable_backup = { pg_patch_disable_backup = {
"spec": { "spec": {
"enableLogicalBackup": False, "enableLogicalBackup": False,
}
} }
} k8s.api.custom_objects_api.patch_namespaced_custom_object(
k8s.api.custom_objects_api.patch_namespaced_custom_object( "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_backup)
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_disable_backup) k8s.wait_for_logical_backup_job_deletion()
k8s.wait_for_logical_backup_job_deletion() jobs = k8s.get_logical_backup_job().items
jobs = k8s.get_logical_backup_job().items self.assertEqual(0, len(jobs),
self.assertEqual(0, len(jobs), "Expected 0 logical backup jobs, found {}".format(len(jobs)))
"Expected 0 logical backup jobs, found {}".format(len(jobs)))
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_min_resource_limits(self): def test_min_resource_limits(self):
@ -361,20 +431,26 @@ class EndToEndTestCase(unittest.TestCase):
} }
k8s.api.custom_objects_api.patch_namespaced_custom_object( k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources) "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources)
k8s.wait_for_pod_failover(failover_targets, labels)
k8s.wait_for_pod_start('spilo-role=replica')
pods = k8s.api.core_v1.list_namespaced_pod( try:
'default', label_selector=labels).items k8s.wait_for_pod_failover(failover_targets, labels)
self.assert_master_is_unique() k8s.wait_for_pod_start('spilo-role=replica')
masterPod = pods[0]
self.assertEqual(masterPod.spec.containers[0].resources.limits['cpu'], minCPULimit, pods = k8s.api.core_v1.list_namespaced_pod(
"Expected CPU limit {}, found {}" 'default', label_selector=labels).items
.format(minCPULimit, masterPod.spec.containers[0].resources.limits['cpu'])) self.assert_master_is_unique()
self.assertEqual(masterPod.spec.containers[0].resources.limits['memory'], minMemoryLimit, masterPod = pods[0]
"Expected memory limit {}, found {}"
.format(minMemoryLimit, masterPod.spec.containers[0].resources.limits['memory'])) self.assertEqual(masterPod.spec.containers[0].resources.limits['cpu'], minCPULimit,
"Expected CPU limit {}, found {}"
.format(minCPULimit, masterPod.spec.containers[0].resources.limits['cpu']))
self.assertEqual(masterPod.spec.containers[0].resources.limits['memory'], minMemoryLimit,
"Expected memory limit {}, found {}"
.format(minMemoryLimit, masterPod.spec.containers[0].resources.limits['memory']))
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_multi_namespace_support(self): def test_multi_namespace_support(self):
@ -388,9 +464,14 @@ class EndToEndTestCase(unittest.TestCase):
pg_manifest["metadata"]["namespace"] = self.namespace pg_manifest["metadata"]["namespace"] = self.namespace
yaml.dump(pg_manifest, f, Dumper=yaml.Dumper) yaml.dump(pg_manifest, f, Dumper=yaml.Dumper)
k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml") try:
k8s.wait_for_pod_start("spilo-role=master", self.namespace) k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml")
self.assert_master_is_unique(self.namespace, "acid-test-cluster") k8s.wait_for_pod_start("spilo-role=master", self.namespace)
self.assert_master_is_unique(self.namespace, "acid-test-cluster")
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_node_readiness_label(self): def test_node_readiness_label(self):
@ -402,40 +483,45 @@ class EndToEndTestCase(unittest.TestCase):
readiness_label = 'lifecycle-status' readiness_label = 'lifecycle-status'
readiness_value = 'ready' readiness_value = 'ready'
# get nodes of master and replica(s) (expected target of new master) try:
current_master_node, current_replica_nodes = k8s.get_pg_nodes(cluster_label) # get nodes of master and replica(s) (expected target of new master)
num_replicas = len(current_replica_nodes) current_master_node, current_replica_nodes = k8s.get_pg_nodes(cluster_label)
failover_targets = self.get_failover_targets(current_master_node, current_replica_nodes) num_replicas = len(current_replica_nodes)
failover_targets = self.get_failover_targets(current_master_node, current_replica_nodes)
# add node_readiness_label to potential failover nodes # add node_readiness_label to potential failover nodes
patch_readiness_label = { patch_readiness_label = {
"metadata": { "metadata": {
"labels": { "labels": {
readiness_label: readiness_value readiness_label: readiness_value
}
} }
} }
} for failover_target in failover_targets:
for failover_target in failover_targets: k8s.api.core_v1.patch_node(failover_target, patch_readiness_label)
k8s.api.core_v1.patch_node(failover_target, patch_readiness_label)
# define node_readiness_label in config map which should trigger a failover of the master # define node_readiness_label in config map which should trigger a failover of the master
patch_readiness_label_config = { patch_readiness_label_config = {
"data": { "data": {
"node_readiness_label": readiness_label + ':' + readiness_value, "node_readiness_label": readiness_label + ':' + readiness_value,
}
} }
} k8s.update_config(patch_readiness_label_config)
k8s.update_config(patch_readiness_label_config) new_master_node, new_replica_nodes = self.assert_failover(
new_master_node, new_replica_nodes = self.assert_failover( current_master_node, num_replicas, failover_targets, cluster_label)
current_master_node, num_replicas, failover_targets, cluster_label)
# patch also node where master ran before # patch also node where master ran before
k8s.api.core_v1.patch_node(current_master_node, patch_readiness_label) k8s.api.core_v1.patch_node(current_master_node, patch_readiness_label)
# wait a little before proceeding with the pod distribution test # wait a little before proceeding with the pod distribution test
time.sleep(30) time.sleep(30)
# toggle pod anti affinity to move replica away from master node # toggle pod anti affinity to move replica away from master node
self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label) self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label)
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_scaling(self): def test_scaling(self):
@ -445,13 +531,18 @@ class EndToEndTestCase(unittest.TestCase):
k8s = self.k8s k8s = self.k8s
labels = "application=spilo,cluster-name=acid-minimal-cluster" labels = "application=spilo,cluster-name=acid-minimal-cluster"
k8s.wait_for_pg_to_scale(3) try:
self.assertEqual(3, k8s.count_pods_with_label(labels)) k8s.wait_for_pg_to_scale(3)
self.assert_master_is_unique() self.assertEqual(3, k8s.count_pods_with_label(labels))
self.assert_master_is_unique()
k8s.wait_for_pg_to_scale(2) k8s.wait_for_pg_to_scale(2)
self.assertEqual(2, k8s.count_pods_with_label(labels)) self.assertEqual(2, k8s.count_pods_with_label(labels))
self.assert_master_is_unique() self.assert_master_is_unique()
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_service_annotations(self): def test_service_annotations(self):
@ -466,27 +557,32 @@ class EndToEndTestCase(unittest.TestCase):
} }
k8s.update_config(patch_custom_service_annotations) k8s.update_config(patch_custom_service_annotations)
pg_patch_custom_annotations = { try:
"spec": { pg_patch_custom_annotations = {
"serviceAnnotations": { "spec": {
"annotation.key": "value", "serviceAnnotations": {
"foo": "bar", "annotation.key": "value",
"foo": "bar",
}
} }
} }
} k8s.api.custom_objects_api.patch_namespaced_custom_object(
k8s.api.custom_objects_api.patch_namespaced_custom_object( "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_custom_annotations)
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_custom_annotations)
# wait a little before proceeding # wait a little before proceeding
time.sleep(30) time.sleep(30)
annotations = { annotations = {
"annotation.key": "value", "annotation.key": "value",
"foo": "bar", "foo": "bar",
} }
self.assertTrue(k8s.check_service_annotations( self.assertTrue(k8s.check_service_annotations(
"cluster-name=acid-minimal-cluster,spilo-role=master", annotations)) "cluster-name=acid-minimal-cluster,spilo-role=master", annotations))
self.assertTrue(k8s.check_service_annotations( self.assertTrue(k8s.check_service_annotations(
"cluster-name=acid-minimal-cluster,spilo-role=replica", annotations)) "cluster-name=acid-minimal-cluster,spilo-role=replica", annotations))
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
# clean up # clean up
unpatch_custom_service_annotations = { unpatch_custom_service_annotations = {
@ -511,24 +607,29 @@ class EndToEndTestCase(unittest.TestCase):
} }
k8s.update_config(patch_sset_propagate_annotations) k8s.update_config(patch_sset_propagate_annotations)
pg_crd_annotations = { try:
"metadata": { pg_crd_annotations = {
"annotations": { "metadata": {
"deployment-time": "2020-04-30 12:00:00", "annotations": {
"downscaler/downtime_replicas": "0", "deployment-time": "2020-04-30 12:00:00",
}, "downscaler/downtime_replicas": "0",
},
}
} }
} k8s.api.custom_objects_api.patch_namespaced_custom_object(
k8s.api.custom_objects_api.patch_namespaced_custom_object( "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_crd_annotations)
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_crd_annotations)
# wait a little before proceeding # wait a little before proceeding
time.sleep(60) time.sleep(60)
annotations = { annotations = {
"deployment-time": "2020-04-30 12:00:00", "deployment-time": "2020-04-30 12:00:00",
"downscaler/downtime_replicas": "0", "downscaler/downtime_replicas": "0",
} }
self.assertTrue(k8s.check_statefulset_annotations(cluster_label, annotations)) self.assertTrue(k8s.check_statefulset_annotations(cluster_label, annotations))
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_taint_based_eviction(self): def test_taint_based_eviction(self):
@ -555,65 +656,29 @@ class EndToEndTestCase(unittest.TestCase):
} }
} }
# patch node and test if master is failing over to one of the expected nodes try:
k8s.api.core_v1.patch_node(current_master_node, body) # patch node and test if master is failing over to one of the expected nodes
new_master_node, new_replica_nodes = self.assert_failover( k8s.api.core_v1.patch_node(current_master_node, body)
current_master_node, num_replicas, failover_targets, cluster_label) new_master_node, new_replica_nodes = self.assert_failover(
current_master_node, num_replicas, failover_targets, cluster_label)
# add toleration to pods # add toleration to pods
patch_toleration_config = { patch_toleration_config = {
"data": { "data": {
"toleration": "key:postgres,operator:Exists,effect:NoExecute" "toleration": "key:postgres,operator:Exists,effect:NoExecute"
}
} }
} k8s.update_config(patch_toleration_config)
k8s.update_config(patch_toleration_config)
# wait a little before proceeding with the pod distribution test # wait a little before proceeding with the pod distribution test
time.sleep(30) time.sleep(30)
# toggle pod anti affinity to move replica away from master node # toggle pod anti affinity to move replica away from master node
self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label) self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label)
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) except timeout_decorator.TimeoutError:
def test_infrastructure_roles(self): print('Operator log: {}'.format(k8s.get_operator_log()))
''' raise
Test using external secrets for infrastructure roles
'''
k8s = self.k8s
# update infrastructure roles description
secret_name = "postgresql-infrastructure-roles"
roles = "secretname: postgresql-infrastructure-roles-new, userkey: user, rolekey: memberof, passwordkey: password, defaultrolevalue: robot_zmon"
patch_infrastructure_roles = {
"data": {
"infrastructure_roles_secret_name": secret_name,
"infrastructure_roles_secrets": roles,
},
}
k8s.update_config(patch_infrastructure_roles)
# wait a little before proceeding
time.sleep(30)
# check that new roles are represented in the config by requesting the
# operator configuration via API
operator_pod = k8s.get_operator_pod()
get_config_cmd = "wget --quiet -O - localhost:8080/config"
result = k8s.exec_with_kubectl(operator_pod.metadata.name, get_config_cmd)
roles_dict = (json.loads(result.stdout)
.get("controller", {})
.get("InfrastructureRoles"))
self.assertTrue("robot_zmon_acid_monitoring_new" in roles_dict)
role = roles_dict["robot_zmon_acid_monitoring_new"]
role.pop("Password", None)
self.assertDictEqual(role, {
"Name": "robot_zmon_acid_monitoring_new",
"Flags": None,
"MemberOf": ["robot_zmon"],
"Parameters": None,
"AdminRole": "",
"Origin": 2,
})
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_x_cluster_deletion(self): def test_x_cluster_deletion(self):
@ -632,65 +697,71 @@ class EndToEndTestCase(unittest.TestCase):
} }
k8s.update_config(patch_delete_annotations) k8s.update_config(patch_delete_annotations)
# this delete attempt should be omitted because of missing annotations try:
k8s.api.custom_objects_api.delete_namespaced_custom_object( # this delete attempt should be omitted because of missing annotations
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster") k8s.api.custom_objects_api.delete_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster")
# check that pods and services are still there # check that pods and services are still there
k8s.wait_for_running_pods(cluster_label, 2) k8s.wait_for_running_pods(cluster_label, 2)
k8s.wait_for_service(cluster_label) k8s.wait_for_service(cluster_label)
# recreate Postgres cluster resource # recreate Postgres cluster resource
k8s.create_with_kubectl("manifests/minimal-postgres-manifest.yaml") k8s.create_with_kubectl("manifests/minimal-postgres-manifest.yaml")
# wait a little before proceeding # wait a little before proceeding
time.sleep(10) time.sleep(10)
# add annotations to manifest # add annotations to manifest
deleteDate = datetime.today().strftime('%Y-%m-%d') delete_date = datetime.today().strftime('%Y-%m-%d')
pg_patch_delete_annotations = { pg_patch_delete_annotations = {
"metadata": { "metadata": {
"annotations": { "annotations": {
"delete-date": deleteDate, "delete-date": delete_date,
"delete-clustername": "acid-minimal-cluster", "delete-clustername": "acid-minimal-cluster",
}
} }
} }
} k8s.api.custom_objects_api.patch_namespaced_custom_object(
k8s.api.custom_objects_api.patch_namespaced_custom_object( "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_delete_annotations)
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_delete_annotations)
# wait a little before proceeding # wait a little before proceeding
time.sleep(10) time.sleep(10)
k8s.wait_for_running_pods(cluster_label, 2) k8s.wait_for_running_pods(cluster_label, 2)
k8s.wait_for_service(cluster_label) k8s.wait_for_service(cluster_label)
# now delete process should be triggered # now delete process should be triggered
k8s.api.custom_objects_api.delete_namespaced_custom_object( k8s.api.custom_objects_api.delete_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster") "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster")
# wait until cluster is deleted # wait until cluster is deleted
time.sleep(120) time.sleep(120)
# check if everything has been deleted # check if everything has been deleted
self.assertEqual(0, k8s.count_pods_with_label(cluster_label)) self.assertEqual(0, k8s.count_pods_with_label(cluster_label))
self.assertEqual(0, k8s.count_services_with_label(cluster_label)) self.assertEqual(0, k8s.count_services_with_label(cluster_label))
self.assertEqual(0, k8s.count_endpoints_with_label(cluster_label)) self.assertEqual(0, k8s.count_endpoints_with_label(cluster_label))
self.assertEqual(0, k8s.count_statefulsets_with_label(cluster_label)) self.assertEqual(0, k8s.count_statefulsets_with_label(cluster_label))
self.assertEqual(0, k8s.count_deployments_with_label(cluster_label)) self.assertEqual(0, k8s.count_deployments_with_label(cluster_label))
self.assertEqual(0, k8s.count_pdbs_with_label(cluster_label)) self.assertEqual(0, k8s.count_pdbs_with_label(cluster_label))
self.assertEqual(0, k8s.count_secrets_with_label(cluster_label)) self.assertEqual(0, k8s.count_secrets_with_label(cluster_label))
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
def get_failover_targets(self, master_node, replica_nodes): def get_failover_targets(self, master_node, replica_nodes):
''' '''
If all pods live on the same node, failover will happen to other worker(s) If all pods live on the same node, failover will happen to other worker(s)
''' '''
k8s = self.k8s k8s = self.k8s
k8s_master_exclusion = 'kubernetes.io/hostname!=postgres-operator-e2e-tests-control-plane'
failover_targets = [x for x in replica_nodes if x != master_node] failover_targets = [x for x in replica_nodes if x != master_node]
if len(failover_targets) == 0: if len(failover_targets) == 0:
nodes = k8s.api.core_v1.list_node() nodes = k8s.api.core_v1.list_node(label_selector=k8s_master_exclusion)
for n in nodes.items: for n in nodes.items:
if "node-role.kubernetes.io/master" not in n.metadata.labels and n.metadata.name != master_node: if n.metadata.name != master_node:
failover_targets.append(n.metadata.name) failover_targets.append(n.metadata.name)
return failover_targets return failover_targets
@ -738,8 +809,7 @@ class EndToEndTestCase(unittest.TestCase):
} }
} }
k8s.update_config(patch_enable_antiaffinity) k8s.update_config(patch_enable_antiaffinity)
self.assert_failover( self.assert_failover(master_node, len(replica_nodes), failover_targets, cluster_label)
master_node, len(replica_nodes), failover_targets, cluster_label)
# now disable pod anti affintiy again which will cause yet another failover # now disable pod anti affintiy again which will cause yet another failover
patch_disable_antiaffinity = { patch_disable_antiaffinity = {
@ -767,6 +837,7 @@ class K8sApi:
self.batch_v1_beta1 = client.BatchV1beta1Api() self.batch_v1_beta1 = client.BatchV1beta1Api()
self.custom_objects_api = client.CustomObjectsApi() self.custom_objects_api = client.CustomObjectsApi()
self.policy_v1_beta1 = client.PolicyV1beta1Api() self.policy_v1_beta1 = client.PolicyV1beta1Api()
self.storage_v1_api = client.StorageV1Api()
class K8s: class K8s:
@ -944,8 +1015,8 @@ class K8s:
def exec_with_kubectl(self, pod, cmd): def exec_with_kubectl(self, pod, cmd):
return subprocess.run(["./exec.sh", pod, cmd], return subprocess.run(["./exec.sh", pod, cmd],
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE) stderr=subprocess.PIPE)
def get_effective_pod_image(self, pod_name, namespace='default'): def get_effective_pod_image(self, pod_name, namespace='default'):
''' '''

5
go.mod
View File

@ -10,12 +10,11 @@ require (
github.com/sirupsen/logrus v1.6.0 github.com/sirupsen/logrus v1.6.0
github.com/stretchr/testify v1.5.1 github.com/stretchr/testify v1.5.1
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
golang.org/x/tools v0.0.0-20200826040757-bc8aaaa29e06 // indirect golang.org/x/tools v0.0.0-20200928201943-a0ef9b62deab // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
gopkg.in/yaml.v2 v2.2.8 gopkg.in/yaml.v2 v2.2.8
k8s.io/api v0.18.8 k8s.io/api v0.18.8
k8s.io/apiextensions-apiserver v0.18.0 k8s.io/apiextensions-apiserver v0.18.0
k8s.io/apimachinery v0.18.8 k8s.io/apimachinery v0.18.8
k8s.io/client-go v0.18.6 k8s.io/client-go v0.18.8
k8s.io/code-generator v0.18.8 k8s.io/code-generator v0.18.8
) )

28
go.sum
View File

@ -137,7 +137,6 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@ -146,7 +145,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@ -156,7 +154,6 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY= github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k=
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
github.com/googleapis/gnostic v0.1.0 h1:rVsPeBmXbYv4If/cumu1AzZPwV58q433hvONV1UEZoI= github.com/googleapis/gnostic v0.1.0 h1:rVsPeBmXbYv4If/cumu1AzZPwV58q433hvONV1UEZoI=
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
@ -181,7 +178,6 @@ github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik= github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok= github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
@ -272,7 +268,6 @@ github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tL
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
@ -281,7 +276,6 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4= github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
@ -293,7 +287,7 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw= github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
@ -339,8 +333,8 @@ golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4= golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA=
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
@ -375,7 +369,6 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -393,12 +386,10 @@ golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgw
golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200826040757-bc8aaaa29e06 h1:ChBCbOHeLqK+j+znGPlWCcvx/t2PdxmyPBheVZxXbcc= golang.org/x/tools v0.0.0-20200928201943-a0ef9b62deab h1:CyH2SDm5ATQiX9gtbMYfvNNed97A9v+TJFnUX/fTaJY=
golang.org/x/tools v0.0.0-20200826040757-bc8aaaa29e06/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200928201943-a0ef9b62deab/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
@ -431,7 +422,6 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74= gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@ -441,20 +431,17 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8= k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8=
k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI=
k8s.io/api v0.18.8 h1:aIKUzJPb96f3fKec2lxtY7acZC9gQNDLVhfSGpxBAC4= k8s.io/api v0.18.8 h1:aIKUzJPb96f3fKec2lxtY7acZC9gQNDLVhfSGpxBAC4=
k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY= k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY=
k8s.io/apiextensions-apiserver v0.18.0 h1:HN4/P8vpGZFvB5SOMuPPH2Wt9Y/ryX+KRvIyAkchu1Q= k8s.io/apiextensions-apiserver v0.18.0 h1:HN4/P8vpGZFvB5SOMuPPH2Wt9Y/ryX+KRvIyAkchu1Q=
k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo= k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo=
k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA= k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
k8s.io/apimachinery v0.18.8 h1:jimPrycCqgx2QPearX3to1JePz7wSbVLq+7PdBTTwQ0= k8s.io/apimachinery v0.18.8 h1:jimPrycCqgx2QPearX3to1JePz7wSbVLq+7PdBTTwQ0=
k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig= k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig=
k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw= k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw=
k8s.io/client-go v0.18.0 h1:yqKw4cTUQraZK3fcVCMeSa+lqKwcjZ5wtcOIPnxQno4=
k8s.io/client-go v0.18.0/go.mod h1:uQSYDYs4WhVZ9i6AIoEZuwUggLVEF64HOD37boKAtF8= k8s.io/client-go v0.18.0/go.mod h1:uQSYDYs4WhVZ9i6AIoEZuwUggLVEF64HOD37boKAtF8=
k8s.io/client-go v0.18.6 h1:I+oWqJbibLSGsZj8Xs8F0aWVXJVIoUHWaaJV3kUN/Zw= k8s.io/client-go v0.18.8 h1:SdbLpIxk5j5YbFr1b7fq8S7mDgDjYmUxSbszyoesoDM=
k8s.io/client-go v0.18.6/go.mod h1:/fwtGLjYMS1MaM5oi+eXhKwG+1UHidUEXRh6cNsdO0Q= k8s.io/client-go v0.18.8/go.mod h1:HqFqMllQ5NnQJNwjro9k5zMyfhZlOwpuTLVrxjkYSxU=
k8s.io/code-generator v0.18.0/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc= k8s.io/code-generator v0.18.0/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
k8s.io/code-generator v0.18.8 h1:lgO1P1wjikEtzNvj7ia+x1VC4svJ28a/r0wnOLhhOTU= k8s.io/code-generator v0.18.8 h1:lgO1P1wjikEtzNvj7ia+x1VC4svJ28a/r0wnOLhhOTU=
k8s.io/code-generator v0.18.8/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c= k8s.io/code-generator v0.18.8/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c=
@ -475,7 +462,6 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E= sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=

View File

@ -69,6 +69,8 @@ spec:
# name: my-config-map # name: my-config-map
enableShmVolume: true enableShmVolume: true
# spiloRunAsUser: 101
# spiloRunAsGroup: 103
# spiloFSGroup: 103 # spiloFSGroup: 103
# podAnnotations: # podAnnotations:
# annotation.key: value # annotation.key: value

View File

@ -15,7 +15,7 @@ data:
# connection_pooler_default_cpu_request: "500m" # connection_pooler_default_cpu_request: "500m"
# connection_pooler_default_memory_limit: 100Mi # connection_pooler_default_memory_limit: 100Mi
# connection_pooler_default_memory_request: 100Mi # connection_pooler_default_memory_request: 100Mi
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-9" connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-11"
# connection_pooler_max_db_connections: 60 # connection_pooler_max_db_connections: 60
# connection_pooler_mode: "transaction" # connection_pooler_mode: "transaction"
# connection_pooler_number_of_instances: 2 # connection_pooler_number_of_instances: 2
@ -31,7 +31,7 @@ data:
# default_memory_request: 100Mi # default_memory_request: 100Mi
# delete_annotation_date_key: delete-date # delete_annotation_date_key: delete-date
# delete_annotation_name_key: delete-clustername # delete_annotation_name_key: delete-clustername
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p3 docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p5
# downscaler_annotations: "deployment-time,downscaler/*" # downscaler_annotations: "deployment-time,downscaler/*"
# enable_admin_role_for_users: "true" # enable_admin_role_for_users: "true"
# enable_crd_validation: "true" # enable_crd_validation: "true"
@ -47,6 +47,7 @@ data:
# enable_team_superuser: "false" # enable_team_superuser: "false"
enable_teams_api: "false" enable_teams_api: "false"
# etcd_host: "" # etcd_host: ""
external_traffic_policy: "Cluster"
# gcp_credentials: "" # gcp_credentials: ""
# kubernetes_use_configmaps: "false" # kubernetes_use_configmaps: "false"
# infrastructure_roles_secret_name: "postgresql-infrastructure-roles" # infrastructure_roles_secret_name: "postgresql-infrastructure-roles"
@ -80,6 +81,7 @@ data:
# pod_environment_secret: "my-custom-secret" # pod_environment_secret: "my-custom-secret"
pod_label_wait_timeout: 10m pod_label_wait_timeout: 10m
pod_management_policy: "ordered_ready" pod_management_policy: "ordered_ready"
# pod_priority_class_name: "postgres-pod-priority"
pod_role_label: spilo-role pod_role_label: spilo-role
# pod_service_account_definition: "" # pod_service_account_definition: ""
pod_service_account_name: "postgres-pod" pod_service_account_name: "postgres-pod"
@ -99,6 +101,8 @@ data:
secret_name_template: "{username}.{cluster}.credentials" secret_name_template: "{username}.{cluster}.credentials"
# sidecar_docker_images: "" # sidecar_docker_images: ""
# set_memory_request_to_limit: "false" # set_memory_request_to_limit: "false"
# spilo_runasuser: 101
# spilo_runasgroup: 103
# spilo_fsgroup: 103 # spilo_fsgroup: 103
spilo_privileged: "false" spilo_privileged: "false"
# storage_resize_mode: "off" # storage_resize_mode: "off"

View File

@ -0,0 +1,8 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
namespace: kube-system
name: standard
annotations:
storageclass.kubernetes.io/is-default-class: "true"
provisioner: kubernetes.io/host-path

View File

@ -196,6 +196,10 @@ spec:
type: string type: string
secret_name_template: secret_name_template:
type: string type: string
spilo_runasuser:
type: integer
spilo_runasgroup:
type: integer
spilo_fsgroup: spilo_fsgroup:
type: integer type: integer
spilo_privileged: spilo_privileged:
@ -261,6 +265,11 @@ spec:
type: boolean type: boolean
enable_replica_load_balancer: enable_replica_load_balancer:
type: boolean type: boolean
external_traffic_policy:
type: string
enum:
- "Cluster"
- "Local"
master_dns_name_format: master_dns_name_format:
type: string type: string
replica_dns_name_format: replica_dns_name_format:

View File

@ -0,0 +1,11 @@
apiVersion: scheduling.k8s.io/v1
description: 'This priority class must be used only for databases controlled by the
Postgres operator'
kind: PriorityClass
metadata:
labels:
application: postgres-operator
name: postgres-pod-priority
preemptionPolicy: PreemptLowerPriority
globalDefault: false
value: 1000000

View File

@ -61,13 +61,15 @@ configuration:
# pod_environment_configmap: "default/my-custom-config" # pod_environment_configmap: "default/my-custom-config"
# pod_environment_secret: "my-custom-secret" # pod_environment_secret: "my-custom-secret"
pod_management_policy: "ordered_ready" pod_management_policy: "ordered_ready"
# pod_priority_class_name: "" # pod_priority_class_name: "postgres-pod-priority"
pod_role_label: spilo-role pod_role_label: spilo-role
# pod_service_account_definition: "" # pod_service_account_definition: ""
pod_service_account_name: postgres-pod pod_service_account_name: postgres-pod
# pod_service_account_role_binding_definition: "" # pod_service_account_role_binding_definition: ""
pod_terminate_grace_period: 5m pod_terminate_grace_period: 5m
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
# spilo_runasuser: 101
# spilo_runasgroup: 103
# spilo_fsgroup: 103 # spilo_fsgroup: 103
spilo_privileged: false spilo_privileged: false
storage_resize_mode: ebs storage_resize_mode: ebs
@ -88,12 +90,13 @@ configuration:
resource_check_interval: 3s resource_check_interval: 3s
resource_check_timeout: 10m resource_check_timeout: 10m
load_balancer: load_balancer:
# db_hosted_zone: ""
enable_master_load_balancer: false
enable_replica_load_balancer: false
# custom_service_annotations: # custom_service_annotations:
# keyx: valuex # keyx: valuex
# keyy: valuey # keyy: valuey
# db_hosted_zone: ""
enable_master_load_balancer: false
enable_replica_load_balancer: false
external_traffic_policy: "Cluster"
master_dns_name_format: "{cluster}.{team}.{hostedzone}" master_dns_name_format: "{cluster}.{team}.{hostedzone}"
replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}" replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}"
aws_or_gcp: aws_or_gcp:

View File

@ -372,6 +372,10 @@ spec:
items: items:
type: object type: object
additionalProperties: true additionalProperties: true
spiloRunAsUser:
type: integer
spiloRunAsGroup:
type: integer
spiloFSGroup: spiloFSGroup:
type: integer type: integer
standby: standby:

View File

@ -522,6 +522,12 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{
}, },
}, },
}, },
"spiloRunAsUser": {
Type: "integer",
},
"spiloRunAsGroup": {
Type: "integer",
},
"spiloFSGroup": { "spiloFSGroup": {
Type: "integer", Type: "integer",
}, },
@ -1021,6 +1027,12 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
"secret_name_template": { "secret_name_template": {
Type: "string", Type: "string",
}, },
"spilo_runasuser": {
Type: "integer",
},
"spilo_runasgroup": {
Type: "integer",
},
"spilo_fsgroup": { "spilo_fsgroup": {
Type: "integer", Type: "integer",
}, },
@ -1126,6 +1138,17 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
"enable_replica_load_balancer": { "enable_replica_load_balancer": {
Type: "boolean", Type: "boolean",
}, },
"external_traffic_policy": {
Type: "string",
Enum: []apiextv1beta1.JSON{
{
Raw: []byte(`"Cluster"`),
},
{
Raw: []byte(`"Local"`),
},
},
},
"master_dns_name_format": { "master_dns_name_format": {
Type: "string", Type: "string",
}, },

View File

@ -49,6 +49,8 @@ type KubernetesMetaConfiguration struct {
PodServiceAccountRoleBindingDefinition string `json:"pod_service_account_role_binding_definition,omitempty"` PodServiceAccountRoleBindingDefinition string `json:"pod_service_account_role_binding_definition,omitempty"`
PodTerminateGracePeriod Duration `json:"pod_terminate_grace_period,omitempty"` PodTerminateGracePeriod Duration `json:"pod_terminate_grace_period,omitempty"`
SpiloPrivileged bool `json:"spilo_privileged,omitempty"` SpiloPrivileged bool `json:"spilo_privileged,omitempty"`
SpiloRunAsUser *int64 `json:"spilo_runasuser,omitempty"`
SpiloRunAsGroup *int64 `json:"spilo_runasgroup,omitempty"`
SpiloFSGroup *int64 `json:"spilo_fsgroup,omitempty"` SpiloFSGroup *int64 `json:"spilo_fsgroup,omitempty"`
WatchedNamespace string `json:"watched_namespace,omitempty"` WatchedNamespace string `json:"watched_namespace,omitempty"`
PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"` PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"`
@ -109,6 +111,7 @@ type LoadBalancerConfiguration struct {
CustomServiceAnnotations map[string]string `json:"custom_service_annotations,omitempty"` CustomServiceAnnotations map[string]string `json:"custom_service_annotations,omitempty"`
MasterDNSNameFormat config.StringTemplate `json:"master_dns_name_format,omitempty"` MasterDNSNameFormat config.StringTemplate `json:"master_dns_name_format,omitempty"`
ReplicaDNSNameFormat config.StringTemplate `json:"replica_dns_name_format,omitempty"` ReplicaDNSNameFormat config.StringTemplate `json:"replica_dns_name_format,omitempty"`
ExternalTrafficPolicy string `json:"external_traffic_policy" default:"Cluster"`
} }
// AWSGCPConfiguration defines the configuration for AWS // AWSGCPConfiguration defines the configuration for AWS
@ -190,20 +193,19 @@ type OperatorLogicalBackupConfiguration struct {
// OperatorConfigurationData defines the operation config // OperatorConfigurationData defines the operation config
type OperatorConfigurationData struct { type OperatorConfigurationData struct {
EnableCRDValidation *bool `json:"enable_crd_validation,omitempty"` EnableCRDValidation *bool `json:"enable_crd_validation,omitempty"`
EnableLazySpiloUpgrade bool `json:"enable_lazy_spilo_upgrade,omitempty"` EnableLazySpiloUpgrade bool `json:"enable_lazy_spilo_upgrade,omitempty"`
EtcdHost string `json:"etcd_host,omitempty"` EtcdHost string `json:"etcd_host,omitempty"`
KubernetesUseConfigMaps bool `json:"kubernetes_use_configmaps,omitempty"` KubernetesUseConfigMaps bool `json:"kubernetes_use_configmaps,omitempty"`
DockerImage string `json:"docker_image,omitempty"` DockerImage string `json:"docker_image,omitempty"`
Workers uint32 `json:"workers,omitempty"` Workers uint32 `json:"workers,omitempty"`
MinInstances int32 `json:"min_instances,omitempty"` MinInstances int32 `json:"min_instances,omitempty"`
MaxInstances int32 `json:"max_instances,omitempty"` MaxInstances int32 `json:"max_instances,omitempty"`
ResyncPeriod Duration `json:"resync_period,omitempty"` ResyncPeriod Duration `json:"resync_period,omitempty"`
RepairPeriod Duration `json:"repair_period,omitempty"` RepairPeriod Duration `json:"repair_period,omitempty"`
SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"` SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"`
ShmVolume *bool `json:"enable_shm_volume,omitempty"` ShmVolume *bool `json:"enable_shm_volume,omitempty"`
// deprecated in favour of SidecarContainers SidecarImages map[string]string `json:"sidecar_docker_images,omitempty"` // deprecated in favour of SidecarContainers
SidecarImages map[string]string `json:"sidecar_docker_images,omitempty"`
SidecarContainers []v1.Container `json:"sidecars,omitempty"` SidecarContainers []v1.Container `json:"sidecars,omitempty"`
PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"` PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"`
Kubernetes KubernetesMetaConfiguration `json:"kubernetes"` Kubernetes KubernetesMetaConfiguration `json:"kubernetes"`

View File

@ -36,7 +36,9 @@ type PostgresSpec struct {
TeamID string `json:"teamId"` TeamID string `json:"teamId"`
DockerImage string `json:"dockerImage,omitempty"` DockerImage string `json:"dockerImage,omitempty"`
SpiloFSGroup *int64 `json:"spiloFSGroup,omitempty"` SpiloRunAsUser *int64 `json:"spiloRunAsUser,omitempty"`
SpiloRunAsGroup *int64 `json:"spiloRunAsGroup,omitempty"`
SpiloFSGroup *int64 `json:"spiloFSGroup,omitempty"`
// vars that enable load balancers are pointers because it is important to know if any of them is omitted from the Postgres manifest // vars that enable load balancers are pointers because it is important to know if any of them is omitted from the Postgres manifest
// in that case the var evaluates to nil and the value is taken from the operator config // in that case the var evaluates to nil and the value is taken from the operator config
@ -54,7 +56,7 @@ type PostgresSpec struct {
NumberOfInstances int32 `json:"numberOfInstances"` NumberOfInstances int32 `json:"numberOfInstances"`
Users map[string]UserFlags `json:"users"` Users map[string]UserFlags `json:"users"`
MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"` MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"`
Clone *CloneDescription `json:"clone"` Clone *CloneDescription `json:"clone,omitempty"`
ClusterName string `json:"-"` ClusterName string `json:"-"`
Databases map[string]string `json:"databases,omitempty"` Databases map[string]string `json:"databases,omitempty"`
PreparedDatabases map[string]PreparedDatabase `json:"preparedDatabases,omitempty"` PreparedDatabases map[string]PreparedDatabase `json:"preparedDatabases,omitempty"`
@ -65,10 +67,10 @@ type PostgresSpec struct {
ShmVolume *bool `json:"enableShmVolume,omitempty"` ShmVolume *bool `json:"enableShmVolume,omitempty"`
EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"` EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"`
LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"` LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"`
StandbyCluster *StandbyDescription `json:"standby"` StandbyCluster *StandbyDescription `json:"standby,omitempty"`
PodAnnotations map[string]string `json:"podAnnotations"` PodAnnotations map[string]string `json:"podAnnotations,omitempty"`
ServiceAnnotations map[string]string `json:"serviceAnnotations"` ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"`
TLS *TLSDescription `json:"tls"` TLS *TLSDescription `json:"tls,omitempty"`
AdditionalVolumes []AdditionalVolume `json:"additionalVolumes,omitempty"` AdditionalVolumes []AdditionalVolume `json:"additionalVolumes,omitempty"`
// deprecated json tags // deprecated json tags

View File

@ -147,6 +147,16 @@ func (in *ConnectionPoolerConfiguration) DeepCopy() *ConnectionPoolerConfigurati
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfiguration) { func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfiguration) {
*out = *in *out = *in
if in.SpiloRunAsUser != nil {
in, out := &in.SpiloRunAsUser, &out.SpiloRunAsUser
*out = new(int64)
**out = **in
}
if in.SpiloRunAsGroup != nil {
in, out := &in.SpiloRunAsGroup, &out.SpiloRunAsGroup
*out = new(int64)
**out = **in
}
if in.SpiloFSGroup != nil { if in.SpiloFSGroup != nil {
in, out := &in.SpiloFSGroup, &out.SpiloFSGroup in, out := &in.SpiloFSGroup, &out.SpiloFSGroup
*out = new(int64) *out = new(int64)
@ -527,6 +537,16 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
*out = new(ConnectionPooler) *out = new(ConnectionPooler)
(*in).DeepCopyInto(*out) (*in).DeepCopyInto(*out)
} }
if in.SpiloRunAsUser != nil {
in, out := &in.SpiloRunAsUser, &out.SpiloRunAsUser
*out = new(int64)
**out = **in
}
if in.SpiloRunAsGroup != nil {
in, out := &in.SpiloRunAsGroup, &out.SpiloRunAsGroup
*out = new(int64)
**out = **in
}
if in.SpiloFSGroup != nil { if in.SpiloFSGroup != nil {
in, out := &in.SpiloFSGroup, &out.SpiloFSGroup in, out := &in.SpiloFSGroup, &out.SpiloFSGroup
*out = new(int64) *out = new(int64)

View File

@ -460,6 +460,15 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
} }
} }
// we assume any change in priority happens by rolling out a new priority class
// changing the priority value in an existing class is not supproted
if c.Statefulset.Spec.Template.Spec.PriorityClassName != statefulSet.Spec.Template.Spec.PriorityClassName {
match = false
needsReplace = true
needsRollUpdate = true
reasons = append(reasons, "new statefulset's pod priority class in spec doesn't match the current one")
}
// lazy Spilo update: modify the image in the statefulset itself but let its pods run with the old image // lazy Spilo update: modify the image in the statefulset itself but let its pods run with the old image
// until they are re-created for other reasons, for example node rotation // until they are re-created for other reasons, for example node rotation
if c.OpConfig.EnableLazySpiloUpgrade && !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.Containers[0].Image, statefulSet.Spec.Template.Spec.Containers[0].Image) { if c.OpConfig.EnableLazySpiloUpgrade && !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.Containers[0].Image, statefulSet.Spec.Template.Spec.Containers[0].Image) {

View File

@ -561,6 +561,8 @@ func (c *Cluster) generatePodTemplate(
initContainers []v1.Container, initContainers []v1.Container,
sidecarContainers []v1.Container, sidecarContainers []v1.Container,
tolerationsSpec *[]v1.Toleration, tolerationsSpec *[]v1.Toleration,
spiloRunAsUser *int64,
spiloRunAsGroup *int64,
spiloFSGroup *int64, spiloFSGroup *int64,
nodeAffinity *v1.Affinity, nodeAffinity *v1.Affinity,
terminateGracePeriod int64, terminateGracePeriod int64,
@ -580,6 +582,14 @@ func (c *Cluster) generatePodTemplate(
containers = append(containers, sidecarContainers...) containers = append(containers, sidecarContainers...)
securityContext := v1.PodSecurityContext{} securityContext := v1.PodSecurityContext{}
if spiloRunAsUser != nil {
securityContext.RunAsUser = spiloRunAsUser
}
if spiloRunAsGroup != nil {
securityContext.RunAsGroup = spiloRunAsGroup
}
if spiloFSGroup != nil { if spiloFSGroup != nil {
securityContext.FSGroup = spiloFSGroup securityContext.FSGroup = spiloFSGroup
} }
@ -1077,7 +1087,17 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
// pickup the docker image for the spilo container // pickup the docker image for the spilo container
effectiveDockerImage := util.Coalesce(spec.DockerImage, c.OpConfig.DockerImage) effectiveDockerImage := util.Coalesce(spec.DockerImage, c.OpConfig.DockerImage)
// determine the FSGroup for the spilo pod // determine the User, Group and FSGroup for the spilo pod
effectiveRunAsUser := c.OpConfig.Resources.SpiloRunAsUser
if spec.SpiloRunAsUser != nil {
effectiveRunAsUser = spec.SpiloRunAsUser
}
effectiveRunAsGroup := c.OpConfig.Resources.SpiloRunAsGroup
if spec.SpiloRunAsGroup != nil {
effectiveRunAsGroup = spec.SpiloRunAsGroup
}
effectiveFSGroup := c.OpConfig.Resources.SpiloFSGroup effectiveFSGroup := c.OpConfig.Resources.SpiloFSGroup
if spec.SpiloFSGroup != nil { if spec.SpiloFSGroup != nil {
effectiveFSGroup = spec.SpiloFSGroup effectiveFSGroup = spec.SpiloFSGroup
@ -1221,6 +1241,8 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
initContainers, initContainers,
sidecarContainers, sidecarContainers,
&tolerationSpec, &tolerationSpec,
effectiveRunAsUser,
effectiveRunAsGroup,
effectiveFSGroup, effectiveFSGroup,
nodeAffinity(c.OpConfig.NodeReadinessLabel), nodeAffinity(c.OpConfig.NodeReadinessLabel),
int64(c.OpConfig.PodTerminateGracePeriod.Seconds()), int64(c.OpConfig.PodTerminateGracePeriod.Seconds()),
@ -1623,6 +1645,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
} }
c.logger.Debugf("final load balancer source ranges as seen in a service spec (not necessarily applied): %q", serviceSpec.LoadBalancerSourceRanges) c.logger.Debugf("final load balancer source ranges as seen in a service spec (not necessarily applied): %q", serviceSpec.LoadBalancerSourceRanges)
serviceSpec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyType(c.OpConfig.ExternalTrafficPolicy)
serviceSpec.Type = v1.ServiceTypeLoadBalancer serviceSpec.Type = v1.ServiceTypeLoadBalancer
} else if role == Replica { } else if role == Replica {
// before PR #258, the replica service was only created if allocated a LB // before PR #258, the replica service was only created if allocated a LB
@ -1900,6 +1923,8 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
[]v1.Container{}, []v1.Container{},
&[]v1.Toleration{}, &[]v1.Toleration{},
nil, nil,
nil,
nil,
nodeAffinity(c.OpConfig.NodeReadinessLabel), nodeAffinity(c.OpConfig.NodeReadinessLabel),
int64(c.OpConfig.PodTerminateGracePeriod.Seconds()), int64(c.OpConfig.PodTerminateGracePeriod.Seconds()),
c.OpConfig.PodServiceAccountName, c.OpConfig.PodServiceAccountName,

View File

@ -1313,6 +1313,8 @@ func TestTLS(t *testing.T) {
var err error var err error
var spec acidv1.PostgresSpec var spec acidv1.PostgresSpec
var cluster *Cluster var cluster *Cluster
var spiloRunAsUser = int64(101)
var spiloRunAsGroup = int64(103)
var spiloFSGroup = int64(103) var spiloFSGroup = int64(103)
var additionalVolumes = spec.AdditionalVolumes var additionalVolumes = spec.AdditionalVolumes
@ -1340,7 +1342,9 @@ func TestTLS(t *testing.T) {
ReplicationUsername: replicationUserName, ReplicationUsername: replicationUserName,
}, },
Resources: config.Resources{ Resources: config.Resources{
SpiloFSGroup: &spiloFSGroup, SpiloRunAsUser: &spiloRunAsUser,
SpiloRunAsGroup: &spiloRunAsGroup,
SpiloFSGroup: &spiloFSGroup,
}, },
}, },
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) }, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
@ -1753,3 +1757,83 @@ func TestSidecars(t *testing.T) {
}) })
} }
func TestGenerateService(t *testing.T) {
var spec acidv1.PostgresSpec
var cluster *Cluster
var enableLB bool = true
spec = acidv1.PostgresSpec{
TeamID: "myapp", NumberOfInstances: 1,
Resources: acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
},
Volume: acidv1.Volume{
Size: "1G",
},
Sidecars: []acidv1.Sidecar{
acidv1.Sidecar{
Name: "cluster-specific-sidecar",
},
acidv1.Sidecar{
Name: "cluster-specific-sidecar-with-resources",
Resources: acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: "210m", Memory: "0.8Gi"},
ResourceLimits: acidv1.ResourceDescription{CPU: "510m", Memory: "1.4Gi"},
},
},
acidv1.Sidecar{
Name: "replace-sidecar",
DockerImage: "overwrite-image",
},
},
EnableMasterLoadBalancer: &enableLB,
}
cluster = New(
Config{
OpConfig: config.Config{
PodManagementPolicy: "ordered_ready",
ProtectedRoles: []string{"admin"},
Auth: config.Auth{
SuperUsername: superUserName,
ReplicationUsername: replicationUserName,
},
Resources: config.Resources{
DefaultCPURequest: "200m",
DefaultCPULimit: "500m",
DefaultMemoryRequest: "0.7Gi",
DefaultMemoryLimit: "1.3Gi",
},
SidecarImages: map[string]string{
"deprecated-global-sidecar": "image:123",
},
SidecarContainers: []v1.Container{
v1.Container{
Name: "global-sidecar",
},
// will be replaced by a cluster specific sidecar with the same name
v1.Container{
Name: "replace-sidecar",
Image: "replaced-image",
},
},
Scalyr: config.Scalyr{
ScalyrAPIKey: "abc",
ScalyrImage: "scalyr-image",
ScalyrCPURequest: "220m",
ScalyrCPULimit: "520m",
ScalyrMemoryRequest: "0.9Gi",
// ise default memory limit
},
ExternalTrafficPolicy: "Cluster",
},
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
service := cluster.generateService(Master, &spec)
assert.Equal(t, v1.ServiceExternalTrafficPolicyTypeCluster, service.Spec.ExternalTrafficPolicy)
cluster.OpConfig.ExternalTrafficPolicy = "Local"
service = cluster.generateService(Master, &spec)
assert.Equal(t, v1.ServiceExternalTrafficPolicyTypeLocal, service.Spec.ExternalTrafficPolicy)
}

View File

@ -696,12 +696,8 @@ func (c *Cluster) syncPreparedDatabases() error {
if err := c.initDbConnWithName(preparedDbName); err != nil { if err := c.initDbConnWithName(preparedDbName); err != nil {
return fmt.Errorf("could not init connection to database %s: %v", preparedDbName, err) return fmt.Errorf("could not init connection to database %s: %v", preparedDbName, err)
} }
defer func() {
if err := c.closeDbConn(); err != nil {
c.logger.Errorf("could not close database connection: %v", err)
}
}()
c.logger.Debugf("syncing prepared database %q", preparedDbName)
// now, prepare defined schemas // now, prepare defined schemas
preparedSchemas := preparedDB.PreparedSchemas preparedSchemas := preparedDB.PreparedSchemas
if len(preparedDB.PreparedSchemas) == 0 { if len(preparedDB.PreparedSchemas) == 0 {
@ -715,6 +711,10 @@ func (c *Cluster) syncPreparedDatabases() error {
if err := c.syncExtensions(preparedDB.Extensions); err != nil { if err := c.syncExtensions(preparedDB.Extensions); err != nil {
return err return err
} }
if err := c.closeDbConn(); err != nil {
c.logger.Errorf("could not close database connection: %v", err)
}
} }
return nil return nil

View File

@ -61,6 +61,8 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.PodEnvironmentSecret = fromCRD.Kubernetes.PodEnvironmentSecret result.PodEnvironmentSecret = fromCRD.Kubernetes.PodEnvironmentSecret
result.PodTerminateGracePeriod = util.CoalesceDuration(time.Duration(fromCRD.Kubernetes.PodTerminateGracePeriod), "5m") result.PodTerminateGracePeriod = util.CoalesceDuration(time.Duration(fromCRD.Kubernetes.PodTerminateGracePeriod), "5m")
result.SpiloPrivileged = fromCRD.Kubernetes.SpiloPrivileged result.SpiloPrivileged = fromCRD.Kubernetes.SpiloPrivileged
result.SpiloRunAsUser = fromCRD.Kubernetes.SpiloRunAsUser
result.SpiloRunAsGroup = fromCRD.Kubernetes.SpiloRunAsGroup
result.SpiloFSGroup = fromCRD.Kubernetes.SpiloFSGroup result.SpiloFSGroup = fromCRD.Kubernetes.SpiloFSGroup
result.ClusterDomain = util.Coalesce(fromCRD.Kubernetes.ClusterDomain, "cluster.local") result.ClusterDomain = util.Coalesce(fromCRD.Kubernetes.ClusterDomain, "cluster.local")
result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace
@ -124,6 +126,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.CustomServiceAnnotations = fromCRD.LoadBalancer.CustomServiceAnnotations result.CustomServiceAnnotations = fromCRD.LoadBalancer.CustomServiceAnnotations
result.MasterDNSNameFormat = fromCRD.LoadBalancer.MasterDNSNameFormat result.MasterDNSNameFormat = fromCRD.LoadBalancer.MasterDNSNameFormat
result.ReplicaDNSNameFormat = fromCRD.LoadBalancer.ReplicaDNSNameFormat result.ReplicaDNSNameFormat = fromCRD.LoadBalancer.ReplicaDNSNameFormat
result.ExternalTrafficPolicy = util.Coalesce(fromCRD.LoadBalancer.ExternalTrafficPolicy, "Cluster")
// AWS or GCP config // AWS or GCP config
result.WALES3Bucket = fromCRD.AWSGCP.WALES3Bucket result.WALES3Bucket = fromCRD.AWSGCP.WALES3Bucket

View File

@ -28,6 +28,8 @@ type Resources struct {
PodLabelWaitTimeout time.Duration `name:"pod_label_wait_timeout" default:"10m"` PodLabelWaitTimeout time.Duration `name:"pod_label_wait_timeout" default:"10m"`
PodDeletionWaitTimeout time.Duration `name:"pod_deletion_wait_timeout" default:"10m"` PodDeletionWaitTimeout time.Duration `name:"pod_deletion_wait_timeout" default:"10m"`
PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"` PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"`
SpiloRunAsUser *int64 `json:"spilo_runasuser,omitempty"`
SpiloRunAsGroup *int64 `json:"spilo_runasgroup,omitempty"`
SpiloFSGroup *int64 `name:"spilo_fsgroup"` SpiloFSGroup *int64 `name:"spilo_fsgroup"`
PodPriorityClassName string `name:"pod_priority_class_name"` PodPriorityClassName string `name:"pod_priority_class_name"`
ClusterDomain string `name:"cluster_domain" default:"cluster.local"` ClusterDomain string `name:"cluster_domain" default:"cluster.local"`
@ -141,14 +143,13 @@ type Config struct {
LogicalBackup LogicalBackup
ConnectionPooler ConnectionPooler
WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to' WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to'
KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"` KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"`
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-12:1.6-p3"` DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-12:1.6-p3"`
// deprecated in favour of SidecarContainers SidecarImages map[string]string `name:"sidecar_docker_images"` // deprecated in favour of SidecarContainers
SidecarImages map[string]string `name:"sidecar_docker_images"` SidecarContainers []v1.Container `name:"sidecars"`
SidecarContainers []v1.Container `name:"sidecars"` PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"`
PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"`
// value of this string must be valid JSON or YAML; see initPodServiceAccount // value of this string must be valid JSON or YAML; see initPodServiceAccount
PodServiceAccountDefinition string `name:"pod_service_account_definition" default:""` PodServiceAccountDefinition string `name:"pod_service_account_definition" default:""`
PodServiceAccountRoleBindingDefinition string `name:"pod_service_account_role_binding_definition" default:""` PodServiceAccountRoleBindingDefinition string `name:"pod_service_account_role_binding_definition" default:""`
@ -175,25 +176,25 @@ type Config struct {
EnablePodAntiAffinity bool `name:"enable_pod_antiaffinity" default:"false"` EnablePodAntiAffinity bool `name:"enable_pod_antiaffinity" default:"false"`
PodAntiAffinityTopologyKey string `name:"pod_antiaffinity_topology_key" default:"kubernetes.io/hostname"` PodAntiAffinityTopologyKey string `name:"pod_antiaffinity_topology_key" default:"kubernetes.io/hostname"`
StorageResizeMode string `name:"storage_resize_mode" default:"ebs"` StorageResizeMode string `name:"storage_resize_mode" default:"ebs"`
// deprecated and kept for backward compatibility EnableLoadBalancer *bool `name:"enable_load_balancer"` // deprecated and kept for backward compatibility
EnableLoadBalancer *bool `name:"enable_load_balancer"` ExternalTrafficPolicy string `name:"external_traffic_policy" default:"Cluster"`
MasterDNSNameFormat StringTemplate `name:"master_dns_name_format" default:"{cluster}.{team}.{hostedzone}"` MasterDNSNameFormat StringTemplate `name:"master_dns_name_format" default:"{cluster}.{team}.{hostedzone}"`
ReplicaDNSNameFormat StringTemplate `name:"replica_dns_name_format" default:"{cluster}-repl.{team}.{hostedzone}"` ReplicaDNSNameFormat StringTemplate `name:"replica_dns_name_format" default:"{cluster}-repl.{team}.{hostedzone}"`
PDBNameFormat StringTemplate `name:"pdb_name_format" default:"postgres-{cluster}-pdb"` PDBNameFormat StringTemplate `name:"pdb_name_format" default:"postgres-{cluster}-pdb"`
EnablePodDisruptionBudget *bool `name:"enable_pod_disruption_budget" default:"true"` EnablePodDisruptionBudget *bool `name:"enable_pod_disruption_budget" default:"true"`
EnableInitContainers *bool `name:"enable_init_containers" default:"true"` EnableInitContainers *bool `name:"enable_init_containers" default:"true"`
EnableSidecars *bool `name:"enable_sidecars" default:"true"` EnableSidecars *bool `name:"enable_sidecars" default:"true"`
Workers uint32 `name:"workers" default:"8"` Workers uint32 `name:"workers" default:"8"`
APIPort int `name:"api_port" default:"8080"` APIPort int `name:"api_port" default:"8080"`
RingLogLines int `name:"ring_log_lines" default:"100"` RingLogLines int `name:"ring_log_lines" default:"100"`
ClusterHistoryEntries int `name:"cluster_history_entries" default:"1000"` ClusterHistoryEntries int `name:"cluster_history_entries" default:"1000"`
TeamAPIRoleConfiguration map[string]string `name:"team_api_role_configuration" default:"log_statement:all"` TeamAPIRoleConfiguration map[string]string `name:"team_api_role_configuration" default:"log_statement:all"`
PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"` PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"`
PodManagementPolicy string `name:"pod_management_policy" default:"ordered_ready"` PodManagementPolicy string `name:"pod_management_policy" default:"ordered_ready"`
ProtectedRoles []string `name:"protected_role_names" default:"admin"` ProtectedRoles []string `name:"protected_role_names" default:"admin"`
PostgresSuperuserTeams []string `name:"postgres_superuser_teams" default:""` PostgresSuperuserTeams []string `name:"postgres_superuser_teams" default:""`
SetMemoryRequestToLimit bool `name:"set_memory_request_to_limit" default:"false"` SetMemoryRequestToLimit bool `name:"set_memory_request_to_limit" default:"false"`
EnableLazySpiloUpgrade bool `name:"enable_lazy_spilo_upgrade" default:"false"` EnableLazySpiloUpgrade bool `name:"enable_lazy_spilo_upgrade" default:"false"`
} }
// MustMarshal marshals the config or panics // MustMarshal marshals the config or panics