Merge branch 'master' into replica-pooler
This commit is contained in:
commit
86e6a51fa9
|
|
@ -0,0 +1,19 @@
|
|||
---
|
||||
name: Postgres Operator issue template
|
||||
about: How are you using the operator?
|
||||
title: ''
|
||||
labels: ''
|
||||
assignees: ''
|
||||
|
||||
---
|
||||
|
||||
Please, answer some short questions which should help us to understand your problem / question better?
|
||||
|
||||
- **Which image of the operator are you using?** e.g. registry.opensource.zalan.do/acid/postgres-operator:v1.5.0
|
||||
- **Where do you run it - cloud or metal? Kubernetes or OpenShift?** [AWS K8s | GCP ... | Bare Metal K8s]
|
||||
- **Are you running Postgres Operator in production?** [yes | no]
|
||||
- **Type of issue?** [Bug report, question, feature request, etc.]
|
||||
|
||||
Some general remarks when posting a bug report:
|
||||
- Please, check the operator, pod (Patroni) and postgresql logs first. When copy-pasting many log lines please do it in a separate GitHub gist together with your Postgres CRD and configuration manifest.
|
||||
- If you feel this issue might be more related to the [Spilo](https://github.com/zalando/spilo/issues) docker image or [Patroni](https://github.com/zalando/patroni/issues), consider opening issues in the respective repos.
|
||||
2
Makefile
2
Makefile
|
|
@ -97,4 +97,4 @@ test:
|
|||
GO111MODULE=on go test ./...
|
||||
|
||||
e2e: docker # build operator image to be tested
|
||||
cd e2e; make tools e2etest clean
|
||||
cd e2e; make e2etest
|
||||
|
|
|
|||
|
|
@ -76,12 +76,6 @@ There is a browser-friendly version of this documentation at
|
|||
* [Postgres manifest reference](docs/reference/cluster_manifest.md)
|
||||
* [Command-line options and environment variables](docs/reference/command_line_and_environment.md)
|
||||
|
||||
## Google Summer of Code
|
||||
|
||||
The Postgres Operator made it to the [Google Summer of Code 2019](https://summerofcode.withgoogle.com/organizations/5429926902104064/)!
|
||||
Check [our ideas](docs/gsoc-2019/ideas.md#google-summer-of-code-2019)
|
||||
and start discussions in [the issue tracker](https://github.com/zalando/postgres-operator/issues).
|
||||
|
||||
## Community
|
||||
|
||||
There are two places to get in touch with the community:
|
||||
|
|
|
|||
|
|
@ -200,6 +200,10 @@ spec:
|
|||
type: string
|
||||
secret_name_template:
|
||||
type: string
|
||||
spilo_runasuser:
|
||||
type: integer
|
||||
spilo_runasgroup:
|
||||
type: integer
|
||||
spilo_fsgroup:
|
||||
type: integer
|
||||
spilo_privileged:
|
||||
|
|
@ -259,6 +263,11 @@ spec:
|
|||
type: boolean
|
||||
enable_replica_load_balancer:
|
||||
type: boolean
|
||||
external_traffic_policy:
|
||||
type: string
|
||||
enum:
|
||||
- "Cluster"
|
||||
- "Local"
|
||||
master_dns_name_format:
|
||||
type: string
|
||||
replica_dns_name_format:
|
||||
|
|
|
|||
|
|
@ -376,6 +376,10 @@ spec:
|
|||
items:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
spiloRunAsUser:
|
||||
type: integer
|
||||
spiloRunAsGroup:
|
||||
type: integer
|
||||
spiloFSGroup:
|
||||
type: integer
|
||||
standby:
|
||||
|
|
|
|||
|
|
@ -9,6 +9,9 @@ metadata:
|
|||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
data:
|
||||
{{- if .Values.podPriorityClassName }}
|
||||
pod_priority_class_name: {{ .Values.podPriorityClassName }}
|
||||
{{- end }}
|
||||
pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }}
|
||||
{{ toYaml .Values.configGeneral | indent 2 }}
|
||||
{{ toYaml .Values.configUsers | indent 2 }}
|
||||
|
|
|
|||
|
|
@ -13,6 +13,9 @@ configuration:
|
|||
users:
|
||||
{{ toYaml .Values.configUsers | indent 4 }}
|
||||
kubernetes:
|
||||
{{- if .Values.podPriorityClassName }}
|
||||
pod_priority_class_name: {{ .Values.podPriorityClassName }}
|
||||
{{- end }}
|
||||
pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }}
|
||||
oauth_token_secret_name: {{ template "postgres-operator.fullname" . }}
|
||||
{{ toYaml .Values.configKubernetes | indent 4 }}
|
||||
|
|
|
|||
|
|
@ -0,0 +1,15 @@
|
|||
{{- if .Values.podPriorityClassName }}
|
||||
apiVersion: scheduling.k8s.io/v1
|
||||
description: 'Use only for databases controlled by Postgres operator'
|
||||
kind: PriorityClass
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
|
||||
helm.sh/chart: {{ template "postgres-operator.chart" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
name: {{ .Values.podPriorityClassName }}
|
||||
preemptionPolicy: PreemptLowerPriority
|
||||
globalDefault: false
|
||||
value: 1000000
|
||||
{{- end }}
|
||||
|
|
@ -127,6 +127,9 @@ configKubernetes:
|
|||
pod_terminate_grace_period: 5m
|
||||
# template for database user secrets generated by the operator
|
||||
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
||||
# set user and group for the spilo container (required to run Spilo as non-root process)
|
||||
# spilo_runasuser: "101"
|
||||
# spilo_runasgroup: "103"
|
||||
# group ID with write-access to volumes (required to run Spilo as non-root process)
|
||||
# spilo_fsgroup: 103
|
||||
|
||||
|
|
@ -180,6 +183,8 @@ configLoadBalancer:
|
|||
enable_master_load_balancer: false
|
||||
# toggles service type load balancer pointing to the replica pod of the cluster
|
||||
enable_replica_load_balancer: false
|
||||
# define external traffic policy for the load balancer
|
||||
external_traffic_policy: "Cluster"
|
||||
# defines the DNS name string template for the master load balancer cluster
|
||||
master_dns_name_format: "{cluster}.{team}.{hostedzone}"
|
||||
# defines the DNS name string template for the replica load balancer cluster
|
||||
|
|
@ -315,8 +320,12 @@ podServiceAccount:
|
|||
# If not set a name is generated using the fullname template and "-pod" suffix
|
||||
name: "postgres-pod"
|
||||
|
||||
# priority class for operator pod
|
||||
priorityClassName: ""
|
||||
|
||||
# priority class for database pods
|
||||
podPriorityClassName: ""
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
|
|
|
|||
|
|
@ -118,6 +118,9 @@ configKubernetes:
|
|||
pod_terminate_grace_period: 5m
|
||||
# template for database user secrets generated by the operator
|
||||
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
||||
# set user and group for the spilo container (required to run Spilo as non-root process)
|
||||
# spilo_runasuser: "101"
|
||||
# spilo_runasgroup: "103"
|
||||
# group ID with write-access to volumes (required to run Spilo as non-root process)
|
||||
# spilo_fsgroup: "103"
|
||||
|
||||
|
|
@ -169,6 +172,8 @@ configLoadBalancer:
|
|||
enable_master_load_balancer: "false"
|
||||
# toggles service type load balancer pointing to the replica pod of the cluster
|
||||
enable_replica_load_balancer: "false"
|
||||
# define external traffic policy for the load balancer
|
||||
external_traffic_policy: "Cluster"
|
||||
# defines the DNS name string template for the master load balancer cluster
|
||||
master_dns_name_format: '{cluster}.{team}.{hostedzone}'
|
||||
# defines the DNS name string template for the replica load balancer cluster
|
||||
|
|
@ -307,8 +312,12 @@ podServiceAccount:
|
|||
# If not set a name is generated using the fullname template and "-pod" suffix
|
||||
name: "postgres-pod"
|
||||
|
||||
# priority class for operator pod
|
||||
priorityClassName: ""
|
||||
|
||||
# priority class for database pods
|
||||
podPriorityClassName: ""
|
||||
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
|
|
|
|||
|
|
@ -2,6 +2,10 @@ version: "2017-09-20"
|
|||
pipeline:
|
||||
- id: build-postgres-operator
|
||||
type: script
|
||||
vm: large
|
||||
cache:
|
||||
paths:
|
||||
- /go/pkg/mod
|
||||
commands:
|
||||
- desc: 'Update'
|
||||
cmd: |
|
||||
|
|
|
|||
|
|
@ -237,9 +237,11 @@ kubectl logs acid-minimal-cluster-0
|
|||
|
||||
## End-to-end tests
|
||||
|
||||
The operator provides reference end-to-end tests (e2e) (as Docker image) to
|
||||
ensure various infrastructure parts work smoothly together. Each e2e execution
|
||||
tests a Postgres Operator image built from the current git branch. The test
|
||||
The operator provides reference end-to-end (e2e) tests to
|
||||
ensure various infrastructure parts work smoothly together. The test code is available at `e2e/tests`.
|
||||
The special `registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner` image is used to run the tests. The container mounts the local `e2e/tests` directory at runtime, so whatever you modify in your local copy of the tests will be executed by a test runner. By maintaining a separate test runner image we avoid the need to re-build the e2e test image on every build.
|
||||
|
||||
Each e2e execution tests a Postgres Operator image built from the current git branch. The test
|
||||
runner creates a new local K8s cluster using [kind](https://kind.sigs.k8s.io/),
|
||||
utilizes provided manifest examples, and runs e2e tests contained in the `tests`
|
||||
folder. The K8s API client in the container connects to the `kind` cluster via
|
||||
|
|
|
|||
|
|
@ -65,6 +65,16 @@ These parameters are grouped directly under the `spec` key in the manifest.
|
|||
custom Docker image that overrides the **docker_image** operator parameter.
|
||||
It should be a [Spilo](https://github.com/zalando/spilo) image. Optional.
|
||||
|
||||
* **spiloRunAsUser**
|
||||
sets the user ID which should be used in the container to run the process.
|
||||
This must be set to run the container without root. By default the container
|
||||
runs with root. This option only works for Spilo versions >= 1.6-p3.
|
||||
|
||||
* **spiloRunAsGroup**
|
||||
sets the group ID which should be used in the container to run the process.
|
||||
This must be set to run the container without root. By default the container
|
||||
runs with root. This option only works for Spilo versions >= 1.6-p3.
|
||||
|
||||
* **spiloFSGroup**
|
||||
the Persistent Volumes for the Spilo pods in the StatefulSet will be owned and
|
||||
writable by the group ID specified. This will override the **spilo_fsgroup**
|
||||
|
|
|
|||
|
|
@ -317,6 +317,16 @@ configuration they are grouped under the `kubernetes` key.
|
|||
that should be assigned to the Postgres pods. The priority class itself must
|
||||
be defined in advance. Default is empty (use the default priority class).
|
||||
|
||||
* **spilo_runasuser**
|
||||
sets the user ID which should be used in the container to run the process.
|
||||
This must be set to run the container without root. By default the container
|
||||
runs with root. This option only works for Spilo versions >= 1.6-p3.
|
||||
|
||||
* **spilo_runasgroup**
|
||||
sets the group ID which should be used in the container to run the process.
|
||||
This must be set to run the container without root. By default the container
|
||||
runs with root. This option only works for Spilo versions >= 1.6-p3.
|
||||
|
||||
* **spilo_fsgroup**
|
||||
the Persistent Volumes for the Spilo pods in the StatefulSet will be owned and
|
||||
writable by the group ID specified. This is required to run Spilo as a
|
||||
|
|
@ -424,6 +434,12 @@ CRD-based configuration.
|
|||
Those options affect the behavior of load balancers created by the operator.
|
||||
In the CRD-based configuration they are grouped under the `load_balancer` key.
|
||||
|
||||
* **custom_service_annotations**
|
||||
This key/value map provides a list of annotations that get attached to each
|
||||
service of a cluster created by the operator. If the annotation key is also
|
||||
provided by the cluster definition, the manifest value is used.
|
||||
Optional.
|
||||
|
||||
* **db_hosted_zone**
|
||||
DNS zone for the cluster DNS name when the load balancer is configured for
|
||||
the cluster. Only used when combined with
|
||||
|
|
@ -440,11 +456,8 @@ In the CRD-based configuration they are grouped under the `load_balancer` key.
|
|||
cluster. Can be overridden by individual cluster settings. The default is
|
||||
`false`.
|
||||
|
||||
* **custom_service_annotations**
|
||||
This key/value map provides a list of annotations that get attached to each
|
||||
service of a cluster created by the operator. If the annotation key is also
|
||||
provided by the cluster definition, the manifest value is used.
|
||||
Optional.
|
||||
* **external_traffic_policy** defines external traffic policy for load
|
||||
balancers. Allowed values are `Cluster` (default) and `Local`.
|
||||
|
||||
* **master_dns_name_format** defines the DNS name string template for the
|
||||
master load balancer cluster. The default is
|
||||
|
|
|
|||
|
|
@ -1,11 +1,12 @@
|
|||
# An image to perform the actual test. Do not forget to copy all necessary test
|
||||
# files here.
|
||||
FROM ubuntu:18.04
|
||||
# An image to run e2e tests.
|
||||
# The image does not include the tests; all necessary files are bind-mounted when a container starts.
|
||||
FROM ubuntu:20.04
|
||||
LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>"
|
||||
|
||||
COPY manifests ./manifests
|
||||
COPY exec.sh ./exec.sh
|
||||
COPY requirements.txt tests ./
|
||||
ENV TERM xterm-256color
|
||||
|
||||
COPY requirements.txt ./
|
||||
COPY scm-source.json ./
|
||||
|
||||
RUN apt-get update \
|
||||
&& apt-get install --no-install-recommends -y \
|
||||
|
|
@ -14,13 +15,10 @@ RUN apt-get update \
|
|||
python3-pip \
|
||||
curl \
|
||||
&& pip3 install --no-cache-dir -r requirements.txt \
|
||||
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.14.0/bin/linux/amd64/kubectl \
|
||||
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.18.0/bin/linux/amd64/kubectl \
|
||||
&& chmod +x ./kubectl \
|
||||
&& mv ./kubectl /usr/local/bin/kubectl \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
ARG VERSION=dev
|
||||
RUN sed -i "s/__version__ = .*/__version__ = '${VERSION}'/" ./__init__.py
|
||||
|
||||
CMD ["python3", "-m", "unittest", "discover", "--start-directory", ".", "-v"]
|
||||
ENTRYPOINT ["python3", "-m", "unittest", "discover", "--start-directory", ".", "-v"]
|
||||
|
|
|
|||
19
e2e/Makefile
19
e2e/Makefile
|
|
@ -1,6 +1,6 @@
|
|||
.PHONY: clean copy docker push tools test
|
||||
|
||||
BINARY ?= postgres-operator-e2e-tests
|
||||
BINARY ?= postgres-operator-e2e-tests-runner
|
||||
BUILD_FLAGS ?= -v
|
||||
CGO_ENABLED ?= 0
|
||||
ifeq ($(RACE),1)
|
||||
|
|
@ -34,15 +34,20 @@ copy: clean
|
|||
mkdir manifests
|
||||
cp ../manifests -r .
|
||||
|
||||
docker: copy
|
||||
docker build --build-arg "VERSION=$(VERSION)" -t "$(IMAGE):$(TAG)" .
|
||||
docker: scm-source.json
|
||||
docker build -t "$(IMAGE):$(TAG)" .
|
||||
|
||||
scm-source.json: ../.git
|
||||
echo '{\n "url": "git:$(GITURL)",\n "revision": "$(GITHEAD)",\n "author": "$(USER)",\n "status": "$(GITSTATUS)"\n}' > scm-source.json
|
||||
|
||||
push: docker
|
||||
docker push "$(IMAGE):$(TAG)"
|
||||
|
||||
tools: docker
|
||||
tools:
|
||||
# install pinned version of 'kind'
|
||||
GO111MODULE=on go get sigs.k8s.io/kind@v0.5.1
|
||||
# go get must run outside of a dir with a (module-based) Go project !
|
||||
# otherwise go get updates project's dependencies and/or behaves differently
|
||||
cd "/tmp" && GO111MODULE=on go get sigs.k8s.io/kind@v0.9.0
|
||||
|
||||
e2etest:
|
||||
./run.sh
|
||||
e2etest: tools copy clean
|
||||
./run.sh main
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
kind: Cluster
|
||||
apiVersion: kind.sigs.k8s.io/v1alpha3
|
||||
apiVersion: kind.x-k8s.io/v1alpha4
|
||||
nodes:
|
||||
- role: control-plane
|
||||
- role: worker
|
||||
|
|
|
|||
48
e2e/run.sh
48
e2e/run.sh
|
|
@ -6,57 +6,67 @@ set -o nounset
|
|||
set -o pipefail
|
||||
IFS=$'\n\t'
|
||||
|
||||
cd $(dirname "$0");
|
||||
|
||||
readonly cluster_name="postgres-operator-e2e-tests"
|
||||
readonly kubeconfig_path="/tmp/kind-config-${cluster_name}"
|
||||
readonly spilo_image="registry.opensource.zalan.do/acid/spilo-12:1.6-p5"
|
||||
|
||||
echo "Clustername: ${cluster_name}"
|
||||
echo "Kubeconfig path: ${kubeconfig_path}"
|
||||
|
||||
function pull_images(){
|
||||
|
||||
operator_tag=$(git describe --tags --always --dirty)
|
||||
if [[ -z $(docker images -q registry.opensource.zalan.do/acid/postgres-operator:${operator_tag}) ]]
|
||||
then
|
||||
docker pull registry.opensource.zalan.do/acid/postgres-operator:latest
|
||||
fi
|
||||
if [[ -z $(docker images -q registry.opensource.zalan.do/acid/postgres-operator-e2e-tests:${operator_tag}) ]]
|
||||
then
|
||||
docker pull registry.opensource.zalan.do/acid/postgres-operator-e2e-tests:latest
|
||||
fi
|
||||
|
||||
operator_image=$(docker images --filter=reference="registry.opensource.zalan.do/acid/postgres-operator" --format "{{.Repository}}:{{.Tag}}" | head -1)
|
||||
e2e_test_image=$(docker images --filter=reference="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests" --format "{{.Repository}}:{{.Tag}}" | head -1)
|
||||
|
||||
# this image does not contain the tests; a container mounts them from a local "./tests" dir at start time
|
||||
e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:latest"
|
||||
docker pull ${e2e_test_runner_image}
|
||||
}
|
||||
|
||||
function start_kind(){
|
||||
|
||||
echo "Starting kind for e2e tests"
|
||||
# avoid interference with previous test runs
|
||||
if [[ $(kind get clusters | grep "^${cluster_name}*") != "" ]]
|
||||
then
|
||||
kind delete cluster --name ${cluster_name}
|
||||
fi
|
||||
|
||||
export KUBECONFIG="${kubeconfig_path}"
|
||||
kind create cluster --name ${cluster_name} --config kind-cluster-postgres-operator-e2e-tests.yaml
|
||||
kind load docker-image "${operator_image}" --name ${cluster_name}
|
||||
kind load docker-image "${e2e_test_image}" --name ${cluster_name}
|
||||
KUBECONFIG="$(kind get kubeconfig-path --name=${cluster_name})"
|
||||
export KUBECONFIG
|
||||
docker pull "${spilo_image}"
|
||||
kind load docker-image "${spilo_image}" --name ${cluster_name}
|
||||
}
|
||||
|
||||
function set_kind_api_server_ip(){
|
||||
echo "Setting up kind API server ip"
|
||||
# use the actual kubeconfig to connect to the 'kind' API server
|
||||
# but update the IP address of the API server to the one from the Docker 'bridge' network
|
||||
cp "${KUBECONFIG}" /tmp
|
||||
readonly local kind_api_server_port=6443 # well-known in the 'kind' codebase
|
||||
readonly local kind_api_server=$(docker inspect --format "{{ .NetworkSettings.IPAddress }}:${kind_api_server_port}" "${cluster_name}"-control-plane)
|
||||
readonly local kind_api_server=$(docker inspect --format "{{ .NetworkSettings.Networks.kind.IPAddress }}:${kind_api_server_port}" "${cluster_name}"-control-plane)
|
||||
sed -i "s/server.*$/server: https:\/\/$kind_api_server/g" "${kubeconfig_path}"
|
||||
}
|
||||
|
||||
function run_tests(){
|
||||
echo "Running tests..."
|
||||
|
||||
# tests modify files in ./manifests, so we mount a copy of this directory done by the e2e Makefile
|
||||
|
||||
docker run --rm --network=host -e "TERM=xterm-256color" \
|
||||
--mount type=bind,source="$(readlink -f ${kubeconfig_path})",target=/root/.kube/config \
|
||||
--mount type=bind,source="$(readlink -f manifests)",target=/manifests \
|
||||
--mount type=bind,source="$(readlink -f tests)",target=/tests \
|
||||
--mount type=bind,source="$(readlink -f exec.sh)",target=/exec.sh \
|
||||
-e OPERATOR_IMAGE="${operator_image}" "${e2e_test_runner_image}"
|
||||
|
||||
docker run --rm --mount type=bind,source="$(readlink -f ${kubeconfig_path})",target=/root/.kube/config -e OPERATOR_IMAGE="${operator_image}" "${e2e_test_image}"
|
||||
}
|
||||
|
||||
function clean_up(){
|
||||
echo "Executing cleanup"
|
||||
unset KUBECONFIG
|
||||
kind delete cluster --name ${cluster_name}
|
||||
rm -rf ${kubeconfig_path}
|
||||
|
|
@ -66,11 +76,11 @@ function main(){
|
|||
|
||||
trap "clean_up" QUIT TERM EXIT
|
||||
|
||||
pull_images
|
||||
start_kind
|
||||
set_kind_api_server_ip
|
||||
time pull_images
|
||||
time start_kind
|
||||
time set_kind_api_server_ip
|
||||
run_tests
|
||||
exit 0
|
||||
}
|
||||
|
||||
main "$@"
|
||||
"$@"
|
||||
|
|
|
|||
|
|
@ -34,10 +34,14 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
In the case of test failure the cluster will stay to enable manual examination;
|
||||
next invocation of "make test" will re-create it.
|
||||
'''
|
||||
print("Test Setup being executed")
|
||||
|
||||
# set a single K8s wrapper for all tests
|
||||
k8s = cls.k8s = K8s()
|
||||
|
||||
# remove existing local storage class and create hostpath class
|
||||
k8s.api.storage_v1_api.delete_storage_class("standard")
|
||||
|
||||
# operator deploys pod service account there on start up
|
||||
# needed for test_multi_namespace_support()
|
||||
cls.namespace = "test"
|
||||
|
|
@ -54,7 +58,8 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
"configmap.yaml",
|
||||
"postgres-operator.yaml",
|
||||
"infrastructure-roles.yaml",
|
||||
"infrastructure-roles-new.yaml"]:
|
||||
"infrastructure-roles-new.yaml",
|
||||
"e2e-storage-class.yaml"]:
|
||||
result = k8s.create_with_kubectl("manifests/" + filename)
|
||||
print("stdout: {}, stderr: {}".format(result.stdout, result.stderr))
|
||||
|
||||
|
|
@ -159,6 +164,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
k8s = self.k8s
|
||||
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
|
||||
|
||||
try:
|
||||
# enable load balancer services
|
||||
pg_patch_enable_lbs = {
|
||||
"spec": {
|
||||
|
|
@ -199,6 +205,57 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
self.assertEqual(repl_svc_type, 'ClusterIP',
|
||||
"Expected ClusterIP service type for replica, found {}".format(repl_svc_type))
|
||||
|
||||
except timeout_decorator.TimeoutError:
|
||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
raise
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_infrastructure_roles(self):
|
||||
'''
|
||||
Test using external secrets for infrastructure roles
|
||||
'''
|
||||
k8s = self.k8s
|
||||
# update infrastructure roles description
|
||||
secret_name = "postgresql-infrastructure-roles"
|
||||
roles = "secretname: postgresql-infrastructure-roles-new, \
|
||||
userkey: user, rolekey: memberof, passwordkey: password, defaultrolevalue: robot_zmon"
|
||||
patch_infrastructure_roles = {
|
||||
"data": {
|
||||
"infrastructure_roles_secret_name": secret_name,
|
||||
"infrastructure_roles_secrets": roles,
|
||||
},
|
||||
}
|
||||
k8s.update_config(patch_infrastructure_roles)
|
||||
|
||||
# wait a little before proceeding
|
||||
time.sleep(30)
|
||||
|
||||
try:
|
||||
# check that new roles are represented in the config by requesting the
|
||||
# operator configuration via API
|
||||
operator_pod = k8s.get_operator_pod()
|
||||
get_config_cmd = "wget --quiet -O - localhost:8080/config"
|
||||
result = k8s.exec_with_kubectl(operator_pod.metadata.name, get_config_cmd)
|
||||
roles_dict = (json.loads(result.stdout)
|
||||
.get("controller", {})
|
||||
.get("InfrastructureRoles"))
|
||||
|
||||
self.assertTrue("robot_zmon_acid_monitoring_new" in roles_dict)
|
||||
role = roles_dict["robot_zmon_acid_monitoring_new"]
|
||||
role.pop("Password", None)
|
||||
self.assertDictEqual(role, {
|
||||
"Name": "robot_zmon_acid_monitoring_new",
|
||||
"Flags": None,
|
||||
"MemberOf": ["robot_zmon"],
|
||||
"Parameters": None,
|
||||
"AdminRole": "",
|
||||
"Origin": 2,
|
||||
})
|
||||
|
||||
except timeout_decorator.TimeoutError:
|
||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
raise
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_lazy_spilo_upgrade(self):
|
||||
'''
|
||||
|
|
@ -226,6 +283,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
pod0 = 'acid-minimal-cluster-0'
|
||||
pod1 = 'acid-minimal-cluster-1'
|
||||
|
||||
try:
|
||||
# restart the pod to get a container with the new image
|
||||
k8s.api.core_v1.delete_namespaced_pod(pod0, 'default')
|
||||
time.sleep(60)
|
||||
|
|
@ -233,7 +291,8 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
# lazy update works if the restarted pod and older pods run different Spilo versions
|
||||
new_image = k8s.get_effective_pod_image(pod0)
|
||||
old_image = k8s.get_effective_pod_image(pod1)
|
||||
self.assertNotEqual(new_image, old_image, "Lazy updated failed: pods have the same image {}".format(new_image))
|
||||
self.assertNotEqual(new_image, old_image,
|
||||
"Lazy updated failed: pods have the same image {}".format(new_image))
|
||||
|
||||
# sanity check
|
||||
assert_msg = "Image {} of a new pod differs from {} in operator conf".format(new_image, conf_image)
|
||||
|
|
@ -256,9 +315,14 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
image0 = k8s.get_effective_pod_image(pod0)
|
||||
image1 = k8s.get_effective_pod_image(pod1)
|
||||
|
||||
assert_msg = "Disabling lazy upgrade failed: pods still have different images {} and {}".format(image0, image1)
|
||||
assert_msg = "Disabling lazy upgrade failed: pods still have different \
|
||||
images {} and {}".format(image0, image1)
|
||||
self.assertEqual(image0, image1, assert_msg)
|
||||
|
||||
except timeout_decorator.TimeoutError:
|
||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
raise
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_logical_backup_cron_job(self):
|
||||
'''
|
||||
|
|
@ -283,6 +347,8 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
}
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_enable_backup)
|
||||
|
||||
try:
|
||||
k8s.wait_for_logical_backup_job_creation()
|
||||
|
||||
jobs = k8s.get_logical_backup_job().items
|
||||
|
|
@ -323,6 +389,10 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
self.assertEqual(0, len(jobs),
|
||||
"Expected 0 logical backup jobs, found {}".format(len(jobs)))
|
||||
|
||||
except timeout_decorator.TimeoutError:
|
||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
raise
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_min_resource_limits(self):
|
||||
'''
|
||||
|
|
@ -361,6 +431,8 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
}
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_resources)
|
||||
|
||||
try:
|
||||
k8s.wait_for_pod_failover(failover_targets, labels)
|
||||
k8s.wait_for_pod_start('spilo-role=replica')
|
||||
|
||||
|
|
@ -376,6 +448,10 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
"Expected memory limit {}, found {}"
|
||||
.format(minMemoryLimit, masterPod.spec.containers[0].resources.limits['memory']))
|
||||
|
||||
except timeout_decorator.TimeoutError:
|
||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
raise
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_multi_namespace_support(self):
|
||||
'''
|
||||
|
|
@ -388,10 +464,15 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
pg_manifest["metadata"]["namespace"] = self.namespace
|
||||
yaml.dump(pg_manifest, f, Dumper=yaml.Dumper)
|
||||
|
||||
try:
|
||||
k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml")
|
||||
k8s.wait_for_pod_start("spilo-role=master", self.namespace)
|
||||
self.assert_master_is_unique(self.namespace, "acid-test-cluster")
|
||||
|
||||
except timeout_decorator.TimeoutError:
|
||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
raise
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_node_readiness_label(self):
|
||||
'''
|
||||
|
|
@ -402,6 +483,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
readiness_label = 'lifecycle-status'
|
||||
readiness_value = 'ready'
|
||||
|
||||
try:
|
||||
# get nodes of master and replica(s) (expected target of new master)
|
||||
current_master_node, current_replica_nodes = k8s.get_pg_nodes(cluster_label)
|
||||
num_replicas = len(current_replica_nodes)
|
||||
|
|
@ -437,6 +519,10 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
# toggle pod anti affinity to move replica away from master node
|
||||
self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label)
|
||||
|
||||
except timeout_decorator.TimeoutError:
|
||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
raise
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_scaling(self):
|
||||
'''
|
||||
|
|
@ -445,6 +531,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
k8s = self.k8s
|
||||
labels = "application=spilo,cluster-name=acid-minimal-cluster"
|
||||
|
||||
try:
|
||||
k8s.wait_for_pg_to_scale(3)
|
||||
self.assertEqual(3, k8s.count_pods_with_label(labels))
|
||||
self.assert_master_is_unique()
|
||||
|
|
@ -453,6 +540,10 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
self.assertEqual(2, k8s.count_pods_with_label(labels))
|
||||
self.assert_master_is_unique()
|
||||
|
||||
except timeout_decorator.TimeoutError:
|
||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
raise
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_service_annotations(self):
|
||||
'''
|
||||
|
|
@ -466,6 +557,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
}
|
||||
k8s.update_config(patch_custom_service_annotations)
|
||||
|
||||
try:
|
||||
pg_patch_custom_annotations = {
|
||||
"spec": {
|
||||
"serviceAnnotations": {
|
||||
|
|
@ -488,6 +580,10 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
self.assertTrue(k8s.check_service_annotations(
|
||||
"cluster-name=acid-minimal-cluster,spilo-role=replica", annotations))
|
||||
|
||||
except timeout_decorator.TimeoutError:
|
||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
raise
|
||||
|
||||
# clean up
|
||||
unpatch_custom_service_annotations = {
|
||||
"data": {
|
||||
|
|
@ -511,6 +607,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
}
|
||||
k8s.update_config(patch_sset_propagate_annotations)
|
||||
|
||||
try:
|
||||
pg_crd_annotations = {
|
||||
"metadata": {
|
||||
"annotations": {
|
||||
|
|
@ -530,6 +627,10 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
}
|
||||
self.assertTrue(k8s.check_statefulset_annotations(cluster_label, annotations))
|
||||
|
||||
except timeout_decorator.TimeoutError:
|
||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
raise
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_taint_based_eviction(self):
|
||||
'''
|
||||
|
|
@ -555,6 +656,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
}
|
||||
}
|
||||
|
||||
try:
|
||||
# patch node and test if master is failing over to one of the expected nodes
|
||||
k8s.api.core_v1.patch_node(current_master_node, body)
|
||||
new_master_node, new_replica_nodes = self.assert_failover(
|
||||
|
|
@ -574,46 +676,9 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
# toggle pod anti affinity to move replica away from master node
|
||||
self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label)
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_infrastructure_roles(self):
|
||||
'''
|
||||
Test using external secrets for infrastructure roles
|
||||
'''
|
||||
k8s = self.k8s
|
||||
# update infrastructure roles description
|
||||
secret_name = "postgresql-infrastructure-roles"
|
||||
roles = "secretname: postgresql-infrastructure-roles-new, userkey: user, rolekey: memberof, passwordkey: password, defaultrolevalue: robot_zmon"
|
||||
patch_infrastructure_roles = {
|
||||
"data": {
|
||||
"infrastructure_roles_secret_name": secret_name,
|
||||
"infrastructure_roles_secrets": roles,
|
||||
},
|
||||
}
|
||||
k8s.update_config(patch_infrastructure_roles)
|
||||
|
||||
# wait a little before proceeding
|
||||
time.sleep(30)
|
||||
|
||||
# check that new roles are represented in the config by requesting the
|
||||
# operator configuration via API
|
||||
operator_pod = k8s.get_operator_pod()
|
||||
get_config_cmd = "wget --quiet -O - localhost:8080/config"
|
||||
result = k8s.exec_with_kubectl(operator_pod.metadata.name, get_config_cmd)
|
||||
roles_dict = (json.loads(result.stdout)
|
||||
.get("controller", {})
|
||||
.get("InfrastructureRoles"))
|
||||
|
||||
self.assertTrue("robot_zmon_acid_monitoring_new" in roles_dict)
|
||||
role = roles_dict["robot_zmon_acid_monitoring_new"]
|
||||
role.pop("Password", None)
|
||||
self.assertDictEqual(role, {
|
||||
"Name": "robot_zmon_acid_monitoring_new",
|
||||
"Flags": None,
|
||||
"MemberOf": ["robot_zmon"],
|
||||
"Parameters": None,
|
||||
"AdminRole": "",
|
||||
"Origin": 2,
|
||||
})
|
||||
except timeout_decorator.TimeoutError:
|
||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
raise
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_x_cluster_deletion(self):
|
||||
|
|
@ -632,6 +697,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
}
|
||||
k8s.update_config(patch_delete_annotations)
|
||||
|
||||
try:
|
||||
# this delete attempt should be omitted because of missing annotations
|
||||
k8s.api.custom_objects_api.delete_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster")
|
||||
|
|
@ -647,11 +713,11 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
time.sleep(10)
|
||||
|
||||
# add annotations to manifest
|
||||
deleteDate = datetime.today().strftime('%Y-%m-%d')
|
||||
delete_date = datetime.today().strftime('%Y-%m-%d')
|
||||
pg_patch_delete_annotations = {
|
||||
"metadata": {
|
||||
"annotations": {
|
||||
"delete-date": deleteDate,
|
||||
"delete-date": delete_date,
|
||||
"delete-clustername": "acid-minimal-cluster",
|
||||
}
|
||||
}
|
||||
|
|
@ -680,17 +746,22 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
self.assertEqual(0, k8s.count_pdbs_with_label(cluster_label))
|
||||
self.assertEqual(0, k8s.count_secrets_with_label(cluster_label))
|
||||
|
||||
except timeout_decorator.TimeoutError:
|
||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
raise
|
||||
|
||||
def get_failover_targets(self, master_node, replica_nodes):
|
||||
'''
|
||||
If all pods live on the same node, failover will happen to other worker(s)
|
||||
'''
|
||||
k8s = self.k8s
|
||||
k8s_master_exclusion = 'kubernetes.io/hostname!=postgres-operator-e2e-tests-control-plane'
|
||||
|
||||
failover_targets = [x for x in replica_nodes if x != master_node]
|
||||
if len(failover_targets) == 0:
|
||||
nodes = k8s.api.core_v1.list_node()
|
||||
nodes = k8s.api.core_v1.list_node(label_selector=k8s_master_exclusion)
|
||||
for n in nodes.items:
|
||||
if "node-role.kubernetes.io/master" not in n.metadata.labels and n.metadata.name != master_node:
|
||||
if n.metadata.name != master_node:
|
||||
failover_targets.append(n.metadata.name)
|
||||
|
||||
return failover_targets
|
||||
|
|
@ -738,8 +809,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
}
|
||||
}
|
||||
k8s.update_config(patch_enable_antiaffinity)
|
||||
self.assert_failover(
|
||||
master_node, len(replica_nodes), failover_targets, cluster_label)
|
||||
self.assert_failover(master_node, len(replica_nodes), failover_targets, cluster_label)
|
||||
|
||||
# now disable pod anti affintiy again which will cause yet another failover
|
||||
patch_disable_antiaffinity = {
|
||||
|
|
@ -767,6 +837,7 @@ class K8sApi:
|
|||
self.batch_v1_beta1 = client.BatchV1beta1Api()
|
||||
self.custom_objects_api = client.CustomObjectsApi()
|
||||
self.policy_v1_beta1 = client.PolicyV1beta1Api()
|
||||
self.storage_v1_api = client.StorageV1Api()
|
||||
|
||||
|
||||
class K8s:
|
||||
|
|
|
|||
5
go.mod
5
go.mod
|
|
@ -10,12 +10,11 @@ require (
|
|||
github.com/sirupsen/logrus v1.6.0
|
||||
github.com/stretchr/testify v1.5.1
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9
|
||||
golang.org/x/tools v0.0.0-20200826040757-bc8aaaa29e06 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
golang.org/x/tools v0.0.0-20200928201943-a0ef9b62deab // indirect
|
||||
gopkg.in/yaml.v2 v2.2.8
|
||||
k8s.io/api v0.18.8
|
||||
k8s.io/apiextensions-apiserver v0.18.0
|
||||
k8s.io/apimachinery v0.18.8
|
||||
k8s.io/client-go v0.18.6
|
||||
k8s.io/client-go v0.18.8
|
||||
k8s.io/code-generator v0.18.8
|
||||
)
|
||||
|
|
|
|||
28
go.sum
28
go.sum
|
|
@ -137,7 +137,6 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb
|
|||
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
|
||||
github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
|
|
@ -146,7 +145,6 @@ github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ
|
|||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
|
|
@ -156,7 +154,6 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+
|
|||
github.com/google/uuid v1.1.1 h1:Gkbcsh/GbpXz7lPftLA3P6TYMwjCLYm83jiFQZF/3gY=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d h1:7XGaL1e6bYS1yIonGp9761ExpPPV1ui0SAC59Yube9k=
|
||||
github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
github.com/googleapis/gnostic v0.1.0 h1:rVsPeBmXbYv4If/cumu1AzZPwV58q433hvONV1UEZoI=
|
||||
github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY=
|
||||
|
|
@ -181,7 +178,6 @@ github.com/jmespath/go-jmespath v0.3.0 h1:OS12ieG61fsCg5+qLJ+SsW9NicxNkg3b25OyT2
|
|||
github.com/jmespath/go-jmespath v0.3.0/go.mod h1:9QtRXoHjLGCJ5IBSaohpXITPlowMeeYCZ7fLUTSywik=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.7 h1:KfgG9LzI+pYjr4xvmz/5H4FXjokeP+rlHLhv3iH62Fo=
|
||||
github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.8 h1:QiWkFLKq0T7mpzwOTu6BzNDbfTE8OLrYhVKYMLF46Ok=
|
||||
github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
|
|
@ -272,7 +268,6 @@ github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tL
|
|||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
|
|
@ -281,7 +276,6 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
|
|||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
|
||||
github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1 h1:nOGnQDM7FYENwehXlg/kFVnos3rEvtKTjRvOWSzb6H4=
|
||||
|
|
@ -293,7 +287,7 @@ github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijb
|
|||
github.com/vektah/gqlparser v1.1.2/go.mod h1:1ycwN7Ij5njmMkPPAOaRFY4rET2Enx7IkVv3vaXspKw=
|
||||
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
|
||||
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
|
||||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg=
|
||||
go.mongodb.org/mongo-driver v1.0.3/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM=
|
||||
|
|
@ -339,8 +333,8 @@ golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344 h1:vGXIOMxbNfDTk/aXCmfdLgkrSV+Z2tcbze+pEc3v5W4=
|
||||
golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45 h1:SVwTIAaPC2U/AvvLNZ2a7OVsmBpC8L5BlwK1whH3hm0=
|
||||
|
|
@ -375,7 +369,6 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
|
|||
golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c h1:fqgJT0MGcGpPgpWU7VRdRjuArfcOvC4AoJmILihzhDg=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4 h1:SvFZT6jyqRaOeXpc5h/JSfZenJ2O330aBsf7JfSUXmQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
|
@ -393,12 +386,10 @@ golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgw
|
|||
golang.org/x/tools v0.0.0-20190617190820-da514acc4774/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc=
|
||||
golang.org/x/tools v0.0.0-20190920225731-5eefd052ad72/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200826040757-bc8aaaa29e06 h1:ChBCbOHeLqK+j+znGPlWCcvx/t2PdxmyPBheVZxXbcc=
|
||||
golang.org/x/tools v0.0.0-20200826040757-bc8aaaa29e06/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200928201943-a0ef9b62deab h1:CyH2SDm5ATQiX9gtbMYfvNNed97A9v+TJFnUX/fTaJY=
|
||||
golang.org/x/tools v0.0.0-20200928201943-a0ef9b62deab/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
|
||||
|
|
@ -431,7 +422,6 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD
|
|||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
|
@ -441,20 +431,17 @@ honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWh
|
|||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc h1:/hemPrYIhOhy8zYrNj+069zDB68us2sMGsfkFJO0iZs=
|
||||
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
k8s.io/api v0.18.0/go.mod h1:q2HRQkfDzHMBZL9l/y9rH63PkQl4vae0xRT+8prbrK8=
|
||||
k8s.io/api v0.18.6/go.mod h1:eeyxr+cwCjMdLAmr2W3RyDI0VvTawSg/3RFFBEnmZGI=
|
||||
k8s.io/api v0.18.8 h1:aIKUzJPb96f3fKec2lxtY7acZC9gQNDLVhfSGpxBAC4=
|
||||
k8s.io/api v0.18.8/go.mod h1:d/CXqwWv+Z2XEG1LgceeDmHQwpUJhROPx16SlxJgERY=
|
||||
k8s.io/apiextensions-apiserver v0.18.0 h1:HN4/P8vpGZFvB5SOMuPPH2Wt9Y/ryX+KRvIyAkchu1Q=
|
||||
k8s.io/apiextensions-apiserver v0.18.0/go.mod h1:18Cwn1Xws4xnWQNC00FLq1E350b9lUF+aOdIWDOZxgo=
|
||||
k8s.io/apimachinery v0.18.0/go.mod h1:9SnR/e11v5IbyPCGbvJViimtJ0SwHG4nfZFjU77ftcA=
|
||||
k8s.io/apimachinery v0.18.6/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko=
|
||||
k8s.io/apimachinery v0.18.8 h1:jimPrycCqgx2QPearX3to1JePz7wSbVLq+7PdBTTwQ0=
|
||||
k8s.io/apimachinery v0.18.8/go.mod h1:6sQd+iHEqmOtALqOFjSWp2KZ9F0wlU/nWm0ZgsYWMig=
|
||||
k8s.io/apiserver v0.18.0/go.mod h1:3S2O6FeBBd6XTo0njUrLxiqk8GNy6wWOftjhJcXYnjw=
|
||||
k8s.io/client-go v0.18.0 h1:yqKw4cTUQraZK3fcVCMeSa+lqKwcjZ5wtcOIPnxQno4=
|
||||
k8s.io/client-go v0.18.0/go.mod h1:uQSYDYs4WhVZ9i6AIoEZuwUggLVEF64HOD37boKAtF8=
|
||||
k8s.io/client-go v0.18.6 h1:I+oWqJbibLSGsZj8Xs8F0aWVXJVIoUHWaaJV3kUN/Zw=
|
||||
k8s.io/client-go v0.18.6/go.mod h1:/fwtGLjYMS1MaM5oi+eXhKwG+1UHidUEXRh6cNsdO0Q=
|
||||
k8s.io/client-go v0.18.8 h1:SdbLpIxk5j5YbFr1b7fq8S7mDgDjYmUxSbszyoesoDM=
|
||||
k8s.io/client-go v0.18.8/go.mod h1:HqFqMllQ5NnQJNwjro9k5zMyfhZlOwpuTLVrxjkYSxU=
|
||||
k8s.io/code-generator v0.18.0/go.mod h1:+UHX5rSbxmR8kzS+FAv7um6dtYrZokQvjHpDSYRVkTc=
|
||||
k8s.io/code-generator v0.18.8 h1:lgO1P1wjikEtzNvj7ia+x1VC4svJ28a/r0wnOLhhOTU=
|
||||
k8s.io/code-generator v0.18.8/go.mod h1:TgNEVx9hCyPGpdtCWA34olQYLkh3ok9ar7XfSsr8b6c=
|
||||
|
|
@ -475,7 +462,6 @@ sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.7/go.mod h1:PHgbrJT
|
|||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0 h1:dOmIZBMfhcHS09XZkMyUgkq5trg3/jRyJYFZUiaOp8E=
|
||||
sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw=
|
||||
sigs.k8s.io/yaml v1.1.0 h1:4A07+ZFc2wgJwo8YNlQpr1rVlgUDlxXHhPJciaPY5gs=
|
||||
sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o=
|
||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
|
|
|
|||
|
|
@ -69,6 +69,8 @@ spec:
|
|||
# name: my-config-map
|
||||
|
||||
enableShmVolume: true
|
||||
# spiloRunAsUser: 101
|
||||
# spiloRunAsGroup: 103
|
||||
# spiloFSGroup: 103
|
||||
# podAnnotations:
|
||||
# annotation.key: value
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ data:
|
|||
# connection_pooler_default_cpu_request: "500m"
|
||||
# connection_pooler_default_memory_limit: 100Mi
|
||||
# connection_pooler_default_memory_request: 100Mi
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-9"
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-11"
|
||||
# connection_pooler_max_db_connections: 60
|
||||
# connection_pooler_mode: "transaction"
|
||||
# connection_pooler_number_of_instances: 2
|
||||
|
|
@ -31,7 +31,7 @@ data:
|
|||
# default_memory_request: 100Mi
|
||||
# delete_annotation_date_key: delete-date
|
||||
# delete_annotation_name_key: delete-clustername
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p3
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-12:1.6-p5
|
||||
# downscaler_annotations: "deployment-time,downscaler/*"
|
||||
# enable_admin_role_for_users: "true"
|
||||
# enable_crd_validation: "true"
|
||||
|
|
@ -47,6 +47,7 @@ data:
|
|||
# enable_team_superuser: "false"
|
||||
enable_teams_api: "false"
|
||||
# etcd_host: ""
|
||||
external_traffic_policy: "Cluster"
|
||||
# gcp_credentials: ""
|
||||
# kubernetes_use_configmaps: "false"
|
||||
# infrastructure_roles_secret_name: "postgresql-infrastructure-roles"
|
||||
|
|
@ -80,6 +81,7 @@ data:
|
|||
# pod_environment_secret: "my-custom-secret"
|
||||
pod_label_wait_timeout: 10m
|
||||
pod_management_policy: "ordered_ready"
|
||||
# pod_priority_class_name: "postgres-pod-priority"
|
||||
pod_role_label: spilo-role
|
||||
# pod_service_account_definition: ""
|
||||
pod_service_account_name: "postgres-pod"
|
||||
|
|
@ -99,6 +101,8 @@ data:
|
|||
secret_name_template: "{username}.{cluster}.credentials"
|
||||
# sidecar_docker_images: ""
|
||||
# set_memory_request_to_limit: "false"
|
||||
# spilo_runasuser: 101
|
||||
# spilo_runasgroup: 103
|
||||
# spilo_fsgroup: 103
|
||||
spilo_privileged: "false"
|
||||
# storage_resize_mode: "off"
|
||||
|
|
|
|||
|
|
@ -0,0 +1,8 @@
|
|||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
namespace: kube-system
|
||||
name: standard
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
provisioner: kubernetes.io/host-path
|
||||
|
|
@ -196,6 +196,10 @@ spec:
|
|||
type: string
|
||||
secret_name_template:
|
||||
type: string
|
||||
spilo_runasuser:
|
||||
type: integer
|
||||
spilo_runasgroup:
|
||||
type: integer
|
||||
spilo_fsgroup:
|
||||
type: integer
|
||||
spilo_privileged:
|
||||
|
|
@ -261,6 +265,11 @@ spec:
|
|||
type: boolean
|
||||
enable_replica_load_balancer:
|
||||
type: boolean
|
||||
external_traffic_policy:
|
||||
type: string
|
||||
enum:
|
||||
- "Cluster"
|
||||
- "Local"
|
||||
master_dns_name_format:
|
||||
type: string
|
||||
replica_dns_name_format:
|
||||
|
|
|
|||
|
|
@ -0,0 +1,11 @@
|
|||
apiVersion: scheduling.k8s.io/v1
|
||||
description: 'This priority class must be used only for databases controlled by the
|
||||
Postgres operator'
|
||||
kind: PriorityClass
|
||||
metadata:
|
||||
labels:
|
||||
application: postgres-operator
|
||||
name: postgres-pod-priority
|
||||
preemptionPolicy: PreemptLowerPriority
|
||||
globalDefault: false
|
||||
value: 1000000
|
||||
|
|
@ -61,13 +61,15 @@ configuration:
|
|||
# pod_environment_configmap: "default/my-custom-config"
|
||||
# pod_environment_secret: "my-custom-secret"
|
||||
pod_management_policy: "ordered_ready"
|
||||
# pod_priority_class_name: ""
|
||||
# pod_priority_class_name: "postgres-pod-priority"
|
||||
pod_role_label: spilo-role
|
||||
# pod_service_account_definition: ""
|
||||
pod_service_account_name: postgres-pod
|
||||
# pod_service_account_role_binding_definition: ""
|
||||
pod_terminate_grace_period: 5m
|
||||
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
||||
# spilo_runasuser: 101
|
||||
# spilo_runasgroup: 103
|
||||
# spilo_fsgroup: 103
|
||||
spilo_privileged: false
|
||||
storage_resize_mode: ebs
|
||||
|
|
@ -88,12 +90,13 @@ configuration:
|
|||
resource_check_interval: 3s
|
||||
resource_check_timeout: 10m
|
||||
load_balancer:
|
||||
# db_hosted_zone: ""
|
||||
enable_master_load_balancer: false
|
||||
enable_replica_load_balancer: false
|
||||
# custom_service_annotations:
|
||||
# keyx: valuex
|
||||
# keyy: valuey
|
||||
# db_hosted_zone: ""
|
||||
enable_master_load_balancer: false
|
||||
enable_replica_load_balancer: false
|
||||
external_traffic_policy: "Cluster"
|
||||
master_dns_name_format: "{cluster}.{team}.{hostedzone}"
|
||||
replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}"
|
||||
aws_or_gcp:
|
||||
|
|
|
|||
|
|
@ -372,6 +372,10 @@ spec:
|
|||
items:
|
||||
type: object
|
||||
additionalProperties: true
|
||||
spiloRunAsUser:
|
||||
type: integer
|
||||
spiloRunAsGroup:
|
||||
type: integer
|
||||
spiloFSGroup:
|
||||
type: integer
|
||||
standby:
|
||||
|
|
|
|||
|
|
@ -522,6 +522,12 @@ var PostgresCRDResourceValidation = apiextv1beta1.CustomResourceValidation{
|
|||
},
|
||||
},
|
||||
},
|
||||
"spiloRunAsUser": {
|
||||
Type: "integer",
|
||||
},
|
||||
"spiloRunAsGroup": {
|
||||
Type: "integer",
|
||||
},
|
||||
"spiloFSGroup": {
|
||||
Type: "integer",
|
||||
},
|
||||
|
|
@ -1021,6 +1027,12 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
|
|||
"secret_name_template": {
|
||||
Type: "string",
|
||||
},
|
||||
"spilo_runasuser": {
|
||||
Type: "integer",
|
||||
},
|
||||
"spilo_runasgroup": {
|
||||
Type: "integer",
|
||||
},
|
||||
"spilo_fsgroup": {
|
||||
Type: "integer",
|
||||
},
|
||||
|
|
@ -1126,6 +1138,17 @@ var OperatorConfigCRDResourceValidation = apiextv1beta1.CustomResourceValidation
|
|||
"enable_replica_load_balancer": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"external_traffic_policy": {
|
||||
Type: "string",
|
||||
Enum: []apiextv1beta1.JSON{
|
||||
{
|
||||
Raw: []byte(`"Cluster"`),
|
||||
},
|
||||
{
|
||||
Raw: []byte(`"Local"`),
|
||||
},
|
||||
},
|
||||
},
|
||||
"master_dns_name_format": {
|
||||
Type: "string",
|
||||
},
|
||||
|
|
|
|||
|
|
@ -49,6 +49,8 @@ type KubernetesMetaConfiguration struct {
|
|||
PodServiceAccountRoleBindingDefinition string `json:"pod_service_account_role_binding_definition,omitempty"`
|
||||
PodTerminateGracePeriod Duration `json:"pod_terminate_grace_period,omitempty"`
|
||||
SpiloPrivileged bool `json:"spilo_privileged,omitempty"`
|
||||
SpiloRunAsUser *int64 `json:"spilo_runasuser,omitempty"`
|
||||
SpiloRunAsGroup *int64 `json:"spilo_runasgroup,omitempty"`
|
||||
SpiloFSGroup *int64 `json:"spilo_fsgroup,omitempty"`
|
||||
WatchedNamespace string `json:"watched_namespace,omitempty"`
|
||||
PDBNameFormat config.StringTemplate `json:"pdb_name_format,omitempty"`
|
||||
|
|
@ -109,6 +111,7 @@ type LoadBalancerConfiguration struct {
|
|||
CustomServiceAnnotations map[string]string `json:"custom_service_annotations,omitempty"`
|
||||
MasterDNSNameFormat config.StringTemplate `json:"master_dns_name_format,omitempty"`
|
||||
ReplicaDNSNameFormat config.StringTemplate `json:"replica_dns_name_format,omitempty"`
|
||||
ExternalTrafficPolicy string `json:"external_traffic_policy" default:"Cluster"`
|
||||
}
|
||||
|
||||
// AWSGCPConfiguration defines the configuration for AWS
|
||||
|
|
@ -202,8 +205,7 @@ type OperatorConfigurationData struct {
|
|||
RepairPeriod Duration `json:"repair_period,omitempty"`
|
||||
SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"`
|
||||
ShmVolume *bool `json:"enable_shm_volume,omitempty"`
|
||||
// deprecated in favour of SidecarContainers
|
||||
SidecarImages map[string]string `json:"sidecar_docker_images,omitempty"`
|
||||
SidecarImages map[string]string `json:"sidecar_docker_images,omitempty"` // deprecated in favour of SidecarContainers
|
||||
SidecarContainers []v1.Container `json:"sidecars,omitempty"`
|
||||
PostgresUsersConfiguration PostgresUsersConfiguration `json:"users"`
|
||||
Kubernetes KubernetesMetaConfiguration `json:"kubernetes"`
|
||||
|
|
|
|||
|
|
@ -36,6 +36,8 @@ type PostgresSpec struct {
|
|||
TeamID string `json:"teamId"`
|
||||
DockerImage string `json:"dockerImage,omitempty"`
|
||||
|
||||
SpiloRunAsUser *int64 `json:"spiloRunAsUser,omitempty"`
|
||||
SpiloRunAsGroup *int64 `json:"spiloRunAsGroup,omitempty"`
|
||||
SpiloFSGroup *int64 `json:"spiloFSGroup,omitempty"`
|
||||
|
||||
// vars that enable load balancers are pointers because it is important to know if any of them is omitted from the Postgres manifest
|
||||
|
|
@ -54,7 +56,7 @@ type PostgresSpec struct {
|
|||
NumberOfInstances int32 `json:"numberOfInstances"`
|
||||
Users map[string]UserFlags `json:"users"`
|
||||
MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"`
|
||||
Clone *CloneDescription `json:"clone"`
|
||||
Clone *CloneDescription `json:"clone,omitempty"`
|
||||
ClusterName string `json:"-"`
|
||||
Databases map[string]string `json:"databases,omitempty"`
|
||||
PreparedDatabases map[string]PreparedDatabase `json:"preparedDatabases,omitempty"`
|
||||
|
|
@ -65,10 +67,10 @@ type PostgresSpec struct {
|
|||
ShmVolume *bool `json:"enableShmVolume,omitempty"`
|
||||
EnableLogicalBackup bool `json:"enableLogicalBackup,omitempty"`
|
||||
LogicalBackupSchedule string `json:"logicalBackupSchedule,omitempty"`
|
||||
StandbyCluster *StandbyDescription `json:"standby"`
|
||||
PodAnnotations map[string]string `json:"podAnnotations"`
|
||||
ServiceAnnotations map[string]string `json:"serviceAnnotations"`
|
||||
TLS *TLSDescription `json:"tls"`
|
||||
StandbyCluster *StandbyDescription `json:"standby,omitempty"`
|
||||
PodAnnotations map[string]string `json:"podAnnotations,omitempty"`
|
||||
ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"`
|
||||
TLS *TLSDescription `json:"tls,omitempty"`
|
||||
AdditionalVolumes []AdditionalVolume `json:"additionalVolumes,omitempty"`
|
||||
|
||||
// deprecated json tags
|
||||
|
|
|
|||
|
|
@ -147,6 +147,16 @@ func (in *ConnectionPoolerConfiguration) DeepCopy() *ConnectionPoolerConfigurati
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfiguration) {
|
||||
*out = *in
|
||||
if in.SpiloRunAsUser != nil {
|
||||
in, out := &in.SpiloRunAsUser, &out.SpiloRunAsUser
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
if in.SpiloRunAsGroup != nil {
|
||||
in, out := &in.SpiloRunAsGroup, &out.SpiloRunAsGroup
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
if in.SpiloFSGroup != nil {
|
||||
in, out := &in.SpiloFSGroup, &out.SpiloFSGroup
|
||||
*out = new(int64)
|
||||
|
|
@ -527,6 +537,16 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
|
|||
*out = new(ConnectionPooler)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.SpiloRunAsUser != nil {
|
||||
in, out := &in.SpiloRunAsUser, &out.SpiloRunAsUser
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
if in.SpiloRunAsGroup != nil {
|
||||
in, out := &in.SpiloRunAsGroup, &out.SpiloRunAsGroup
|
||||
*out = new(int64)
|
||||
**out = **in
|
||||
}
|
||||
if in.SpiloFSGroup != nil {
|
||||
in, out := &in.SpiloFSGroup, &out.SpiloFSGroup
|
||||
*out = new(int64)
|
||||
|
|
|
|||
|
|
@ -460,6 +460,15 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
|
|||
}
|
||||
}
|
||||
|
||||
// we assume any change in priority happens by rolling out a new priority class
|
||||
// changing the priority value in an existing class is not supproted
|
||||
if c.Statefulset.Spec.Template.Spec.PriorityClassName != statefulSet.Spec.Template.Spec.PriorityClassName {
|
||||
match = false
|
||||
needsReplace = true
|
||||
needsRollUpdate = true
|
||||
reasons = append(reasons, "new statefulset's pod priority class in spec doesn't match the current one")
|
||||
}
|
||||
|
||||
// lazy Spilo update: modify the image in the statefulset itself but let its pods run with the old image
|
||||
// until they are re-created for other reasons, for example node rotation
|
||||
if c.OpConfig.EnableLazySpiloUpgrade && !reflect.DeepEqual(c.Statefulset.Spec.Template.Spec.Containers[0].Image, statefulSet.Spec.Template.Spec.Containers[0].Image) {
|
||||
|
|
|
|||
|
|
@ -561,6 +561,8 @@ func (c *Cluster) generatePodTemplate(
|
|||
initContainers []v1.Container,
|
||||
sidecarContainers []v1.Container,
|
||||
tolerationsSpec *[]v1.Toleration,
|
||||
spiloRunAsUser *int64,
|
||||
spiloRunAsGroup *int64,
|
||||
spiloFSGroup *int64,
|
||||
nodeAffinity *v1.Affinity,
|
||||
terminateGracePeriod int64,
|
||||
|
|
@ -580,6 +582,14 @@ func (c *Cluster) generatePodTemplate(
|
|||
containers = append(containers, sidecarContainers...)
|
||||
securityContext := v1.PodSecurityContext{}
|
||||
|
||||
if spiloRunAsUser != nil {
|
||||
securityContext.RunAsUser = spiloRunAsUser
|
||||
}
|
||||
|
||||
if spiloRunAsGroup != nil {
|
||||
securityContext.RunAsGroup = spiloRunAsGroup
|
||||
}
|
||||
|
||||
if spiloFSGroup != nil {
|
||||
securityContext.FSGroup = spiloFSGroup
|
||||
}
|
||||
|
|
@ -1077,7 +1087,17 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
// pickup the docker image for the spilo container
|
||||
effectiveDockerImage := util.Coalesce(spec.DockerImage, c.OpConfig.DockerImage)
|
||||
|
||||
// determine the FSGroup for the spilo pod
|
||||
// determine the User, Group and FSGroup for the spilo pod
|
||||
effectiveRunAsUser := c.OpConfig.Resources.SpiloRunAsUser
|
||||
if spec.SpiloRunAsUser != nil {
|
||||
effectiveRunAsUser = spec.SpiloRunAsUser
|
||||
}
|
||||
|
||||
effectiveRunAsGroup := c.OpConfig.Resources.SpiloRunAsGroup
|
||||
if spec.SpiloRunAsGroup != nil {
|
||||
effectiveRunAsGroup = spec.SpiloRunAsGroup
|
||||
}
|
||||
|
||||
effectiveFSGroup := c.OpConfig.Resources.SpiloFSGroup
|
||||
if spec.SpiloFSGroup != nil {
|
||||
effectiveFSGroup = spec.SpiloFSGroup
|
||||
|
|
@ -1221,6 +1241,8 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
initContainers,
|
||||
sidecarContainers,
|
||||
&tolerationSpec,
|
||||
effectiveRunAsUser,
|
||||
effectiveRunAsGroup,
|
||||
effectiveFSGroup,
|
||||
nodeAffinity(c.OpConfig.NodeReadinessLabel),
|
||||
int64(c.OpConfig.PodTerminateGracePeriod.Seconds()),
|
||||
|
|
@ -1623,6 +1645,7 @@ func (c *Cluster) generateService(role PostgresRole, spec *acidv1.PostgresSpec)
|
|||
}
|
||||
|
||||
c.logger.Debugf("final load balancer source ranges as seen in a service spec (not necessarily applied): %q", serviceSpec.LoadBalancerSourceRanges)
|
||||
serviceSpec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyType(c.OpConfig.ExternalTrafficPolicy)
|
||||
serviceSpec.Type = v1.ServiceTypeLoadBalancer
|
||||
} else if role == Replica {
|
||||
// before PR #258, the replica service was only created if allocated a LB
|
||||
|
|
@ -1900,6 +1923,8 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
|
|||
[]v1.Container{},
|
||||
&[]v1.Toleration{},
|
||||
nil,
|
||||
nil,
|
||||
nil,
|
||||
nodeAffinity(c.OpConfig.NodeReadinessLabel),
|
||||
int64(c.OpConfig.PodTerminateGracePeriod.Seconds()),
|
||||
c.OpConfig.PodServiceAccountName,
|
||||
|
|
|
|||
|
|
@ -1313,6 +1313,8 @@ func TestTLS(t *testing.T) {
|
|||
var err error
|
||||
var spec acidv1.PostgresSpec
|
||||
var cluster *Cluster
|
||||
var spiloRunAsUser = int64(101)
|
||||
var spiloRunAsGroup = int64(103)
|
||||
var spiloFSGroup = int64(103)
|
||||
var additionalVolumes = spec.AdditionalVolumes
|
||||
|
||||
|
|
@ -1340,6 +1342,8 @@ func TestTLS(t *testing.T) {
|
|||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
Resources: config.Resources{
|
||||
SpiloRunAsUser: &spiloRunAsUser,
|
||||
SpiloRunAsGroup: &spiloRunAsGroup,
|
||||
SpiloFSGroup: &spiloFSGroup,
|
||||
},
|
||||
},
|
||||
|
|
@ -1753,3 +1757,83 @@ func TestSidecars(t *testing.T) {
|
|||
})
|
||||
|
||||
}
|
||||
|
||||
func TestGenerateService(t *testing.T) {
|
||||
var spec acidv1.PostgresSpec
|
||||
var cluster *Cluster
|
||||
var enableLB bool = true
|
||||
spec = acidv1.PostgresSpec{
|
||||
TeamID: "myapp", NumberOfInstances: 1,
|
||||
Resources: acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
},
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1G",
|
||||
},
|
||||
Sidecars: []acidv1.Sidecar{
|
||||
acidv1.Sidecar{
|
||||
Name: "cluster-specific-sidecar",
|
||||
},
|
||||
acidv1.Sidecar{
|
||||
Name: "cluster-specific-sidecar-with-resources",
|
||||
Resources: acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "210m", Memory: "0.8Gi"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "510m", Memory: "1.4Gi"},
|
||||
},
|
||||
},
|
||||
acidv1.Sidecar{
|
||||
Name: "replace-sidecar",
|
||||
DockerImage: "overwrite-image",
|
||||
},
|
||||
},
|
||||
EnableMasterLoadBalancer: &enableLB,
|
||||
}
|
||||
|
||||
cluster = New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
ProtectedRoles: []string{"admin"},
|
||||
Auth: config.Auth{
|
||||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
Resources: config.Resources{
|
||||
DefaultCPURequest: "200m",
|
||||
DefaultCPULimit: "500m",
|
||||
DefaultMemoryRequest: "0.7Gi",
|
||||
DefaultMemoryLimit: "1.3Gi",
|
||||
},
|
||||
SidecarImages: map[string]string{
|
||||
"deprecated-global-sidecar": "image:123",
|
||||
},
|
||||
SidecarContainers: []v1.Container{
|
||||
v1.Container{
|
||||
Name: "global-sidecar",
|
||||
},
|
||||
// will be replaced by a cluster specific sidecar with the same name
|
||||
v1.Container{
|
||||
Name: "replace-sidecar",
|
||||
Image: "replaced-image",
|
||||
},
|
||||
},
|
||||
Scalyr: config.Scalyr{
|
||||
ScalyrAPIKey: "abc",
|
||||
ScalyrImage: "scalyr-image",
|
||||
ScalyrCPURequest: "220m",
|
||||
ScalyrCPULimit: "520m",
|
||||
ScalyrMemoryRequest: "0.9Gi",
|
||||
// ise default memory limit
|
||||
},
|
||||
ExternalTrafficPolicy: "Cluster",
|
||||
},
|
||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
||||
|
||||
service := cluster.generateService(Master, &spec)
|
||||
assert.Equal(t, v1.ServiceExternalTrafficPolicyTypeCluster, service.Spec.ExternalTrafficPolicy)
|
||||
cluster.OpConfig.ExternalTrafficPolicy = "Local"
|
||||
service = cluster.generateService(Master, &spec)
|
||||
assert.Equal(t, v1.ServiceExternalTrafficPolicyTypeLocal, service.Spec.ExternalTrafficPolicy)
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -696,12 +696,8 @@ func (c *Cluster) syncPreparedDatabases() error {
|
|||
if err := c.initDbConnWithName(preparedDbName); err != nil {
|
||||
return fmt.Errorf("could not init connection to database %s: %v", preparedDbName, err)
|
||||
}
|
||||
defer func() {
|
||||
if err := c.closeDbConn(); err != nil {
|
||||
c.logger.Errorf("could not close database connection: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
c.logger.Debugf("syncing prepared database %q", preparedDbName)
|
||||
// now, prepare defined schemas
|
||||
preparedSchemas := preparedDB.PreparedSchemas
|
||||
if len(preparedDB.PreparedSchemas) == 0 {
|
||||
|
|
@ -715,6 +711,10 @@ func (c *Cluster) syncPreparedDatabases() error {
|
|||
if err := c.syncExtensions(preparedDB.Extensions); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := c.closeDbConn(); err != nil {
|
||||
c.logger.Errorf("could not close database connection: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -61,6 +61,8 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.PodEnvironmentSecret = fromCRD.Kubernetes.PodEnvironmentSecret
|
||||
result.PodTerminateGracePeriod = util.CoalesceDuration(time.Duration(fromCRD.Kubernetes.PodTerminateGracePeriod), "5m")
|
||||
result.SpiloPrivileged = fromCRD.Kubernetes.SpiloPrivileged
|
||||
result.SpiloRunAsUser = fromCRD.Kubernetes.SpiloRunAsUser
|
||||
result.SpiloRunAsGroup = fromCRD.Kubernetes.SpiloRunAsGroup
|
||||
result.SpiloFSGroup = fromCRD.Kubernetes.SpiloFSGroup
|
||||
result.ClusterDomain = util.Coalesce(fromCRD.Kubernetes.ClusterDomain, "cluster.local")
|
||||
result.WatchedNamespace = fromCRD.Kubernetes.WatchedNamespace
|
||||
|
|
@ -124,6 +126,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.CustomServiceAnnotations = fromCRD.LoadBalancer.CustomServiceAnnotations
|
||||
result.MasterDNSNameFormat = fromCRD.LoadBalancer.MasterDNSNameFormat
|
||||
result.ReplicaDNSNameFormat = fromCRD.LoadBalancer.ReplicaDNSNameFormat
|
||||
result.ExternalTrafficPolicy = util.Coalesce(fromCRD.LoadBalancer.ExternalTrafficPolicy, "Cluster")
|
||||
|
||||
// AWS or GCP config
|
||||
result.WALES3Bucket = fromCRD.AWSGCP.WALES3Bucket
|
||||
|
|
|
|||
|
|
@ -28,6 +28,8 @@ type Resources struct {
|
|||
PodLabelWaitTimeout time.Duration `name:"pod_label_wait_timeout" default:"10m"`
|
||||
PodDeletionWaitTimeout time.Duration `name:"pod_deletion_wait_timeout" default:"10m"`
|
||||
PodTerminateGracePeriod time.Duration `name:"pod_terminate_grace_period" default:"5m"`
|
||||
SpiloRunAsUser *int64 `json:"spilo_runasuser,omitempty"`
|
||||
SpiloRunAsGroup *int64 `json:"spilo_runasgroup,omitempty"`
|
||||
SpiloFSGroup *int64 `name:"spilo_fsgroup"`
|
||||
PodPriorityClassName string `name:"pod_priority_class_name"`
|
||||
ClusterDomain string `name:"cluster_domain" default:"cluster.local"`
|
||||
|
|
@ -145,8 +147,7 @@ type Config struct {
|
|||
KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"`
|
||||
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS
|
||||
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-12:1.6-p3"`
|
||||
// deprecated in favour of SidecarContainers
|
||||
SidecarImages map[string]string `name:"sidecar_docker_images"`
|
||||
SidecarImages map[string]string `name:"sidecar_docker_images"` // deprecated in favour of SidecarContainers
|
||||
SidecarContainers []v1.Container `name:"sidecars"`
|
||||
PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"`
|
||||
// value of this string must be valid JSON or YAML; see initPodServiceAccount
|
||||
|
|
@ -175,8 +176,8 @@ type Config struct {
|
|||
EnablePodAntiAffinity bool `name:"enable_pod_antiaffinity" default:"false"`
|
||||
PodAntiAffinityTopologyKey string `name:"pod_antiaffinity_topology_key" default:"kubernetes.io/hostname"`
|
||||
StorageResizeMode string `name:"storage_resize_mode" default:"ebs"`
|
||||
// deprecated and kept for backward compatibility
|
||||
EnableLoadBalancer *bool `name:"enable_load_balancer"`
|
||||
EnableLoadBalancer *bool `name:"enable_load_balancer"` // deprecated and kept for backward compatibility
|
||||
ExternalTrafficPolicy string `name:"external_traffic_policy" default:"Cluster"`
|
||||
MasterDNSNameFormat StringTemplate `name:"master_dns_name_format" default:"{cluster}.{team}.{hostedzone}"`
|
||||
ReplicaDNSNameFormat StringTemplate `name:"replica_dns_name_format" default:"{cluster}-repl.{team}.{hostedzone}"`
|
||||
PDBNameFormat StringTemplate `name:"pdb_name_format" default:"postgres-{cluster}-pdb"`
|
||||
|
|
|
|||
Loading…
Reference in New Issue