Merge branch 'master' into gh-pages

This commit is contained in:
Felix Kunde 2021-08-27 15:24:29 +02:00
commit 191e7878b3
62 changed files with 2365 additions and 1419 deletions

View File

@ -9,7 +9,7 @@ assignees: ''
Please, answer some short questions which should help us to understand your problem / question better? Please, answer some short questions which should help us to understand your problem / question better?
- **Which image of the operator are you using?** e.g. registry.opensource.zalan.do/acid/postgres-operator:v1.6.3 - **Which image of the operator are you using?** e.g. registry.opensource.zalan.do/acid/postgres-operator:v1.7.0
- **Where do you run it - cloud or metal? Kubernetes or OpenShift?** [AWS K8s | GCP ... | Bare Metal K8s] - **Where do you run it - cloud or metal? Kubernetes or OpenShift?** [AWS K8s | GCP ... | Bare Metal K8s]
- **Are you running Postgres Operator in production?** [yes | no] - **Are you running Postgres Operator in production?** [yes | no]
- **Type of issue?** [Bug report, question, feature request, etc.] - **Type of issue?** [Bug report, question, feature request, etc.]

View File

@ -66,7 +66,7 @@ We introduce the major version into the backup path to smoothen the [major versi
The new operator configuration can set a compatibility flag *enable_spilo_wal_path_compat* to make Spilo look for wal segments in the current path but also old format paths. The new operator configuration can set a compatibility flag *enable_spilo_wal_path_compat* to make Spilo look for wal segments in the current path but also old format paths.
This comes at potential performance costs and should be disabled after a few days. This comes at potential performance costs and should be disabled after a few days.
The newest Spilo 13 image is: `registry.opensource.zalan.do/acid/spilo-13:2.0-p7` The newest Spilo 13 image is: `registry.opensource.zalan.do/acid/spilo-13:2.1-p1`
The last Spilo 12 image is: `registry.opensource.zalan.do/acid/spilo-12:1.6-p5` The last Spilo 12 image is: `registry.opensource.zalan.do/acid/spilo-12:1.6-p5`

View File

@ -1,7 +1,7 @@
apiVersion: v1 apiVersion: v1
name: postgres-operator-ui name: postgres-operator-ui
version: 1.6.3 version: 1.7.0
appVersion: 1.6.3 appVersion: 1.7.0
home: https://github.com/zalando/postgres-operator home: https://github.com/zalando/postgres-operator
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
keywords: keywords:

View File

@ -1,10 +1,34 @@
apiVersion: v1 apiVersion: v1
entries: entries:
postgres-operator-ui: postgres-operator-ui:
- apiVersion: v1
appVersion: 1.7.0
created: "2021-08-27T10:23:17.723412079+02:00"
description: Postgres Operator UI provides a graphical interface for a convenient
database-as-a-service user experience
digest: ad08ee5fe31bb2e7c3cc1299c2e778511a3c05305bc17357404b2615b32ea92a
home: https://github.com/zalando/postgres-operator
keywords:
- postgres
- operator
- ui
- cloud-native
- patroni
- spilo
maintainers:
- email: opensource@zalando.de
name: Zalando
name: postgres-operator-ui
sources:
- https://github.com/zalando/postgres-operator
urls:
- postgres-operator-ui-1.7.0.tgz
version: 1.7.0
- apiVersion: v1 - apiVersion: v1
appVersion: 1.6.3 appVersion: 1.6.3
created: "2021-05-27T19:04:33.425637932+02:00" created: "2021-08-27T10:23:17.722255571+02:00"
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience description: Postgres Operator UI provides a graphical interface for a convenient
database-as-a-service user experience
digest: 08b810aa632dcc719e4785ef184e391267f7c460caa99677f2d00719075aac78 digest: 08b810aa632dcc719e4785ef184e391267f7c460caa99677f2d00719075aac78
home: https://github.com/zalando/postgres-operator home: https://github.com/zalando/postgres-operator
keywords: keywords:
@ -25,8 +49,9 @@ entries:
version: 1.6.3 version: 1.6.3
- apiVersion: v1 - apiVersion: v1
appVersion: 1.6.2 appVersion: 1.6.2
created: "2021-05-27T19:04:33.422124263+02:00" created: "2021-08-27T10:23:17.721712848+02:00"
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience description: Postgres Operator UI provides a graphical interface for a convenient
database-as-a-service user experience
digest: 14d1559bb0bd1e1e828f2daaaa6f6ac9ffc268d79824592c3589b55dd39241f6 digest: 14d1559bb0bd1e1e828f2daaaa6f6ac9ffc268d79824592c3589b55dd39241f6
home: https://github.com/zalando/postgres-operator home: https://github.com/zalando/postgres-operator
keywords: keywords:
@ -47,8 +72,9 @@ entries:
version: 1.6.2 version: 1.6.2
- apiVersion: v1 - apiVersion: v1
appVersion: 1.6.1 appVersion: 1.6.1
created: "2021-05-27T19:04:33.419640902+02:00" created: "2021-08-27T10:23:17.721175629+02:00"
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience description: Postgres Operator UI provides a graphical interface for a convenient
database-as-a-service user experience
digest: 3d321352f2f1e7bb7450aa8876e3d818aa9f9da9bd4250507386f0490f2c1969 digest: 3d321352f2f1e7bb7450aa8876e3d818aa9f9da9bd4250507386f0490f2c1969
home: https://github.com/zalando/postgres-operator home: https://github.com/zalando/postgres-operator
keywords: keywords:
@ -69,8 +95,9 @@ entries:
version: 1.6.1 version: 1.6.1
- apiVersion: v1 - apiVersion: v1
appVersion: 1.6.0 appVersion: 1.6.0
created: "2021-05-27T19:04:33.41788193+02:00" created: "2021-08-27T10:23:17.720655498+02:00"
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience description: Postgres Operator UI provides a graphical interface for a convenient
database-as-a-service user experience
digest: 1e0aa1e7db3c1daa96927ffbf6fdbcdb434562f961833cb5241ddbe132220ee4 digest: 1e0aa1e7db3c1daa96927ffbf6fdbcdb434562f961833cb5241ddbe132220ee4
home: https://github.com/zalando/postgres-operator home: https://github.com/zalando/postgres-operator
keywords: keywords:
@ -91,8 +118,9 @@ entries:
version: 1.6.0 version: 1.6.0
- apiVersion: v1 - apiVersion: v1
appVersion: 1.5.0 appVersion: 1.5.0
created: "2021-05-27T19:04:33.416056821+02:00" created: "2021-08-27T10:23:17.720112359+02:00"
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience description: Postgres Operator UI provides a graphical interface for a convenient
database-as-a-service user experience
digest: c91ea39e6d51d57f4048fb1b6ec53b40823f2690eb88e4e4f1a036367b9fdd61 digest: c91ea39e6d51d57f4048fb1b6ec53b40823f2690eb88e4e4f1a036367b9fdd61
home: https://github.com/zalando/postgres-operator home: https://github.com/zalando/postgres-operator
keywords: keywords:
@ -111,4 +139,4 @@ entries:
urls: urls:
- postgres-operator-ui-1.5.0.tgz - postgres-operator-ui-1.5.0.tgz
version: 1.5.0 version: 1.5.0
generated: "2021-05-27T19:04:33.41380858+02:00" generated: "2021-08-27T10:23:17.719397521+02:00"

View File

@ -7,6 +7,7 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
name: {{ template "postgres-operator-ui.fullname" . }} name: {{ template "postgres-operator-ui.fullname" . }}
namespace: {{ .Release.Namespace }}
spec: spec:
replicas: 1 replicas: 1
selector: selector:
@ -43,13 +44,13 @@ spec:
- name: "APP_URL" - name: "APP_URL"
value: "http://localhost:8081" value: "http://localhost:8081"
- name: "OPERATOR_API_URL" - name: "OPERATOR_API_URL"
value: {{ .Values.envs.operatorApiUrl }} value: {{ .Values.envs.operatorApiUrl | quote }}
- name: "OPERATOR_CLUSTER_NAME_LABEL" - name: "OPERATOR_CLUSTER_NAME_LABEL"
value: {{ .Values.envs.operatorClusterNameLabel }} value: {{ .Values.envs.operatorClusterNameLabel | quote }}
- name: "RESOURCES_VISIBLE" - name: "RESOURCES_VISIBLE"
value: "{{ .Values.envs.resourcesVisible }}" value: {{ .Values.envs.resourcesVisible | quote }}
- name: "TARGET_NAMESPACE" - name: "TARGET_NAMESPACE"
value: "{{ .Values.envs.targetNamespace }}" value: {{ .Values.envs.targetNamespace | quote }}
- name: "TEAMS" - name: "TEAMS"
value: |- value: |-
[ [
@ -75,3 +76,6 @@ spec:
"11" "11"
] ]
} }
{{- if .Values.extraEnvs }}
{{- .Values.extraEnvs | toYaml | nindent 12 }}
{{- end }}

View File

@ -1,7 +1,10 @@
{{- if .Values.ingress.enabled -}} {{- if .Values.ingress.enabled -}}
{{- $fullName := include "postgres-operator-ui.fullname" . -}} {{- $fullName := include "postgres-operator-ui.fullname" . -}}
{{- $svcPort := .Values.service.port -}} {{- $svcPort := .Values.service.port -}}
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1 apiVersion: networking.k8s.io/v1beta1
{{- else -}} {{- else -}}
apiVersion: extensions/v1beta1 apiVersion: extensions/v1beta1
@ -9,6 +12,7 @@ apiVersion: extensions/v1beta1
kind: Ingress kind: Ingress
metadata: metadata:
name: {{ $fullName }} name: {{ $fullName }}
namespace: {{ .Release.Namespace }}
labels: labels:
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }} app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
helm.sh/chart: {{ template "postgres-operator-ui.chart" . }} helm.sh/chart: {{ template "postgres-operator-ui.chart" . }}
@ -36,9 +40,18 @@ spec:
paths: paths:
{{- range .paths }} {{- range .paths }}
- path: {{ . }} - path: {{ . }}
{{ if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion -}}
pathType: ImplementationSpecific
backend:
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{- else -}}
backend: backend:
serviceName: {{ $fullName }} serviceName: {{ $fullName }}
servicePort: {{ $svcPort }} servicePort: {{ $svcPort }}
{{- end -}}
{{- end }} {{- end }}
{{- end }} {{- end }}
{{- end }} {{- end }}

View File

@ -7,6 +7,7 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
name: {{ template "postgres-operator-ui.fullname" . }} name: {{ template "postgres-operator-ui.fullname" . }}
namespace: {{ .Release.Namespace }}
spec: spec:
ports: ports:
- port: {{ .Values.service.port }} - port: {{ .Values.service.port }}

View File

@ -3,6 +3,7 @@ apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: {{ include "postgres-operator-ui.serviceAccountName" . }} name: {{ include "postgres-operator-ui.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels: labels:
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }} app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
helm.sh/chart: {{ template "postgres-operator-ui.chart" . }} helm.sh/chart: {{ template "postgres-operator-ui.chart" . }}

View File

@ -8,7 +8,7 @@ replicaCount: 1
image: image:
registry: registry.opensource.zalan.do registry: registry.opensource.zalan.do
repository: acid/postgres-operator-ui repository: acid/postgres-operator-ui
tag: v1.6.3 tag: v1.7.0
pullPolicy: "IfNotPresent" pullPolicy: "IfNotPresent"
# Optionally specify an array of imagePullSecrets. # Optionally specify an array of imagePullSecrets.
@ -48,6 +48,36 @@ envs:
teams: teams:
- "acid" - "acid"
# configure extra UI ENVs
# Extra ENVs are writen in kubenertes format and added "as is" to the pod's env variables
# https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/
# https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables
# UI specific env variables can be found here: https://github.com/zalando/postgres-operator/blob/master/ui/operator_ui/main.py
extraEnvs:
[]
# Exemple of settings to make snapshot view working in the ui when using AWS
# - name: WALE_S3_ENDPOINT
# value: https+path://s3.us-east-1.amazonaws.com:443
# - name: SPILO_S3_BACKUP_PREFIX
# value: spilo/
# - name: AWS_ACCESS_KEY_ID
# valueFrom:
# secretKeyRef:
# name: <postgres operator secret with AWS token>
# key: AWS_ACCESS_KEY_ID
# - name: AWS_SECRET_ACCESS_KEY
# valueFrom:
# secretKeyRef:
# name: <postgres operator secret with AWS token>
# key: AWS_SECRET_ACCESS_KEY
# - name: AWS_DEFAULT_REGION
# valueFrom:
# secretKeyRef:
# name: <postgres operator secret with AWS token>
# key: AWS_DEFAULT_REGION
# - name: SPILO_S3_BACKUP_BUCKET
# value: <s3 bucket used by the operator>
# configure UI service # configure UI service
service: service:
type: "ClusterIP" type: "ClusterIP"
@ -59,7 +89,8 @@ service:
# configure UI ingress. If needed: "enabled: true" # configure UI ingress. If needed: "enabled: true"
ingress: ingress:
enabled: false enabled: false
annotations: {} annotations:
{}
# kubernetes.io/ingress.class: nginx # kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true" # kubernetes.io/tls-acme: "true"
hosts: hosts:

View File

@ -1,7 +1,7 @@
apiVersion: v1 apiVersion: v1
name: postgres-operator name: postgres-operator
version: 1.6.3 version: 1.7.0
appVersion: 1.6.3 appVersion: 1.7.0
home: https://github.com/zalando/postgres-operator home: https://github.com/zalando/postgres-operator
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
keywords: keywords:

View File

@ -65,7 +65,7 @@ spec:
properties: properties:
docker_image: docker_image:
type: string type: string
default: "registry.opensource.zalan.do/acid/spilo-13:2.0-p7" default: "registry.opensource.zalan.do/acid/spilo-13:2.1-p1"
enable_crd_validation: enable_crd_validation:
type: boolean type: boolean
default: true default: true
@ -173,6 +173,9 @@ spec:
enable_init_containers: enable_init_containers:
type: boolean type: boolean
default: true default: true
enable_cross_namespace_secret:
type: boolean
default: false
enable_pod_antiaffinity: enable_pod_antiaffinity:
type: boolean type: boolean
default: false default: false
@ -392,12 +395,14 @@ spec:
type: string type: string
wal_s3_bucket: wal_s3_bucket:
type: string type: string
wal_az_storage_account:
type: string
logical_backup: logical_backup:
type: object type: object
properties: properties:
logical_backup_docker_image: logical_backup_docker_image:
type: string type: string
default: "registry.opensource.zalan.do/acid/logical-backup:v1.6.3" default: "registry.opensource.zalan.do/acid/logical-backup:v1.7.0"
logical_backup_google_application_credentials: logical_backup_google_application_credentials:
type: string type: string
logical_backup_job_prefix: logical_backup_job_prefix:
@ -532,7 +537,7 @@ spec:
default: "pooler" default: "pooler"
connection_pooler_image: connection_pooler_image:
type: string type: string
default: "registry.opensource.zalan.do/acid/pgbouncer:master-16" default: "registry.opensource.zalan.do/acid/pgbouncer:master-18"
connection_pooler_max_db_connections: connection_pooler_max_db_connections:
type: integer type: integer
default: 60 default: 60

View File

@ -223,6 +223,97 @@ spec:
items: items:
type: string type: string
pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$' pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$'
nodeAffinity:
type: object
properties:
preferredDuringSchedulingIgnoredDuringExecution:
type: array
items:
type: object
required:
- weight
- preference
properties:
preference:
type: object
properties:
matchExpressions:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
matchFields:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
weight:
format: int32
type: integer
requiredDuringSchedulingIgnoredDuringExecution:
type: object
required:
- nodeSelectorTerms
properties:
nodeSelectorTerms:
type: array
items:
type: object
properties:
matchExpressions:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
matchFields:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
numberOfInstances: numberOfInstances:
type: integer type: integer
minimum: 0 minimum: 0
@ -303,6 +394,8 @@ spec:
type: boolean type: boolean
defaultRoles: defaultRoles:
type: boolean type: boolean
secretNamespace:
type: string
replicaLoadBalancer: # deprecated replicaLoadBalancer: # deprecated
type: boolean type: boolean
resources: resources:
@ -396,97 +489,6 @@ spec:
type: string type: string
caSecretName: caSecretName:
type: string type: string
nodeAffinity:
type: object
properties:
preferredDuringSchedulingIgnoredDuringExecution:
type: array
items:
type: object
required:
- weight
- preference
properties:
preference:
type: object
properties:
matchExpressions:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
matchFields:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
weight:
format: int32
type: integer
requiredDuringSchedulingIgnoredDuringExecution:
type: object
required:
- nodeSelectorTerms
properties:
nodeSelectorTerms:
type: array
items:
type: object
properties:
matchExpressions:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
matchFields:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
tolerations: tolerations:
type: array type: array
items: items:
@ -559,6 +561,24 @@ spec:
properties: properties:
iops: iops:
type: integer type: integer
selector:
type: object
properties:
matchExpressions:
type: array
items:
type: object
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
matchLabels:
type: object
size: size:
type: string type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'

View File

@ -1,10 +1,33 @@
apiVersion: v1 apiVersion: v1
entries: entries:
postgres-operator: postgres-operator:
- apiVersion: v1
appVersion: 1.7.0
created: "2021-08-27T10:21:42.643185124+02:00"
description: Postgres Operator creates and manages PostgreSQL clusters running
in Kubernetes
digest: 1c4a1d289188ef72e409892fd2b86c008a37420af04a9796a8829ff84ab09e61
home: https://github.com/zalando/postgres-operator
keywords:
- postgres
- operator
- cloud-native
- patroni
- spilo
maintainers:
- email: opensource@zalando.de
name: Zalando
name: postgres-operator
sources:
- https://github.com/zalando/postgres-operator
urls:
- postgres-operator-1.7.0.tgz
version: 1.7.0
- apiVersion: v1 - apiVersion: v1
appVersion: 1.6.3 appVersion: 1.6.3
created: "2021-05-27T19:04:25.199523943+02:00" created: "2021-08-27T10:21:42.640069574+02:00"
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes description: Postgres Operator creates and manages PostgreSQL clusters running
in Kubernetes
digest: ea08f991bf23c9ad114bca98ebcbe3e2fa15beab163061399394905eaee89b35 digest: ea08f991bf23c9ad114bca98ebcbe3e2fa15beab163061399394905eaee89b35
home: https://github.com/zalando/postgres-operator home: https://github.com/zalando/postgres-operator
keywords: keywords:
@ -24,8 +47,9 @@ entries:
version: 1.6.3 version: 1.6.3
- apiVersion: v1 - apiVersion: v1
appVersion: 1.6.2 appVersion: 1.6.2
created: "2021-05-27T19:04:25.198182197+02:00" created: "2021-08-27T10:21:42.638502739+02:00"
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes description: Postgres Operator creates and manages PostgreSQL clusters running
in Kubernetes
digest: d886f8a0879ca07d1e5246ee7bc55710e1c872f3977280fe495db6fc2057a7f4 digest: d886f8a0879ca07d1e5246ee7bc55710e1c872f3977280fe495db6fc2057a7f4
home: https://github.com/zalando/postgres-operator home: https://github.com/zalando/postgres-operator
keywords: keywords:
@ -45,8 +69,9 @@ entries:
version: 1.6.2 version: 1.6.2
- apiVersion: v1 - apiVersion: v1
appVersion: 1.6.1 appVersion: 1.6.1
created: "2021-05-27T19:04:25.19687586+02:00" created: "2021-08-27T10:21:42.636936467+02:00"
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes description: Postgres Operator creates and manages PostgreSQL clusters running
in Kubernetes
digest: 4ba5972cd486dcaa2d11c5613a6f97f6b7b831822e610fe9e10a57ea1db23556 digest: 4ba5972cd486dcaa2d11c5613a6f97f6b7b831822e610fe9e10a57ea1db23556
home: https://github.com/zalando/postgres-operator home: https://github.com/zalando/postgres-operator
keywords: keywords:
@ -66,8 +91,9 @@ entries:
version: 1.6.1 version: 1.6.1
- apiVersion: v1 - apiVersion: v1
appVersion: 1.6.0 appVersion: 1.6.0
created: "2021-05-27T19:04:25.195600766+02:00" created: "2021-08-27T10:21:42.63533527+02:00"
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes description: Postgres Operator creates and manages PostgreSQL clusters running
in Kubernetes
digest: f52149718ea364f46b4b9eec9a65f6253ad182bb78df541d14cd5277b9c8a8c3 digest: f52149718ea364f46b4b9eec9a65f6253ad182bb78df541d14cd5277b9c8a8c3
home: https://github.com/zalando/postgres-operator home: https://github.com/zalando/postgres-operator
keywords: keywords:
@ -87,8 +113,9 @@ entries:
version: 1.6.0 version: 1.6.0
- apiVersion: v1 - apiVersion: v1
appVersion: 1.5.0 appVersion: 1.5.0
created: "2021-05-27T19:04:25.193985032+02:00" created: "2021-08-27T10:21:42.632932257+02:00"
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes description: Postgres Operator creates and manages PostgreSQL clusters running
in Kubernetes
digest: 198351d5db52e65cdf383d6f3e1745d91ac1e2a01121f8476f8b1be728b09531 digest: 198351d5db52e65cdf383d6f3e1745d91ac1e2a01121f8476f8b1be728b09531
home: https://github.com/zalando/postgres-operator home: https://github.com/zalando/postgres-operator
keywords: keywords:
@ -106,4 +133,4 @@ entries:
urls: urls:
- postgres-operator-1.5.0.tgz - postgres-operator-1.5.0.tgz
version: 1.5.0 version: 1.5.0
generated: "2021-05-27T19:04:25.191897769+02:00" generated: "2021-08-27T10:21:42.631372502+02:00"

Binary file not shown.

View File

@ -51,3 +51,24 @@ Create chart name and version as used by the chart label.
{{- define "postgres-operator.chart" -}} {{- define "postgres-operator.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}} {{- end -}}
{{/*
Flatten nested config options when ConfigMap is used as ConfigTarget
*/}}
{{- define "flattenValuesForConfigMap" }}
{{- range $key, $value := . }}
{{- if or (kindIs "string" $value) (kindIs "int" $value) }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- if kindIs "slice" $value }}
{{ $key }}: {{ join "," $value | quote }}
{{- end }}
{{- if kindIs "map" $value }}
{{- $list := list }}
{{- range $subKey, $subValue := $value }}
{{- $list = append $list (printf "%s:%s" $subKey $subValue) }}
{{ $key }}: {{ join "," $list | quote }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -3,6 +3,7 @@ apiVersion: v1
kind: ConfigMap kind: ConfigMap
metadata: metadata:
name: {{ template "postgres-operator.fullname" . }} name: {{ template "postgres-operator.fullname" . }}
namespace: {{ .Release.Namespace }}
labels: labels:
app.kubernetes.io/name: {{ template "postgres-operator.name" . }} app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
helm.sh/chart: {{ template "postgres-operator.chart" . }} helm.sh/chart: {{ template "postgres-operator.chart" . }}
@ -13,16 +14,16 @@ data:
pod_priority_class_name: {{ .Values.podPriorityClassName }} pod_priority_class_name: {{ .Values.podPriorityClassName }}
{{- end }} {{- end }}
pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }} pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }}
{{ toYaml .Values.configGeneral | indent 2 }} {{- include "flattenValuesForConfigMap" .Values.configGeneral | indent 2 }}
{{ toYaml .Values.configUsers | indent 2 }} {{- include "flattenValuesForConfigMap" .Values.configUsers | indent 2 }}
{{ toYaml .Values.configMajorVersionUpgrade | indent 2 }} {{- include "flattenValuesForConfigMap" .Values.configMajorVersionUpgrade | indent 2 }}
{{ toYaml .Values.configKubernetes | indent 2 }} {{- include "flattenValuesForConfigMap" .Values.configKubernetes | indent 2 }}
{{ toYaml .Values.configTimeouts | indent 2 }} {{- include "flattenValuesForConfigMap" .Values.configTimeouts | indent 2 }}
{{ toYaml .Values.configLoadBalancer | indent 2 }} {{- include "flattenValuesForConfigMap" .Values.configLoadBalancer | indent 2 }}
{{ toYaml .Values.configAwsOrGcp | indent 2 }} {{- include "flattenValuesForConfigMap" .Values.configAwsOrGcp | indent 2 }}
{{ toYaml .Values.configLogicalBackup | indent 2 }} {{- include "flattenValuesForConfigMap" .Values.configLogicalBackup | indent 2 }}
{{ toYaml .Values.configDebug | indent 2 }} {{- include "flattenValuesForConfigMap" .Values.configDebug | indent 2 }}
{{ toYaml .Values.configLoggingRestApi | indent 2 }} {{- include "flattenValuesForConfigMap" .Values.configLoggingRestApi | indent 2 }}
{{ toYaml .Values.configTeamsApi | indent 2 }} {{- include "flattenValuesForConfigMap" .Values.configTeamsApi | indent 2 }}
{{ toYaml .Values.configConnectionPooler | indent 2 }} {{- include "flattenValuesForConfigMap" .Values.configConnectionPooler | indent 2 }}
{{- end }} {{- end }}

View File

@ -7,6 +7,7 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
name: {{ template "postgres-operator.fullname" . }} name: {{ template "postgres-operator.fullname" . }}
namespace: {{ .Release.Namespace }}
spec: spec:
replicas: 1 replicas: 1
selector: selector:

View File

@ -3,6 +3,7 @@ apiVersion: "acid.zalan.do/v1"
kind: OperatorConfiguration kind: OperatorConfiguration
metadata: metadata:
name: {{ template "postgres-operator.fullname" . }} name: {{ template "postgres-operator.fullname" . }}
namespace: {{ .Release.Namespace }}
labels: labels:
app.kubernetes.io/name: {{ template "postgres-operator.name" . }} app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
helm.sh/chart: {{ template "postgres-operator.chart" . }} helm.sh/chart: {{ template "postgres-operator.chart" . }}

View File

@ -9,6 +9,7 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
name: {{ .Values.podPriorityClassName }} name: {{ .Values.podPriorityClassName }}
namespace: {{ .Release.Namespace }}
preemptionPolicy: PreemptLowerPriority preemptionPolicy: PreemptLowerPriority
globalDefault: false globalDefault: false
value: 1000000 value: 1000000

View File

@ -7,6 +7,7 @@ metadata:
app.kubernetes.io/managed-by: {{ .Release.Service }} app.kubernetes.io/managed-by: {{ .Release.Service }}
app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/instance: {{ .Release.Name }}
name: {{ template "postgres-operator.fullname" . }} name: {{ template "postgres-operator.fullname" . }}
namespace: {{ .Release.Namespace }}
spec: spec:
type: ClusterIP type: ClusterIP
ports: ports:

View File

@ -3,6 +3,7 @@ apiVersion: v1
kind: ServiceAccount kind: ServiceAccount
metadata: metadata:
name: {{ include "postgres-operator.serviceAccountName" . }} name: {{ include "postgres-operator.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels: labels:
app.kubernetes.io/name: {{ template "postgres-operator.name" . }} app.kubernetes.io/name: {{ template "postgres-operator.name" . }}
helm.sh/chart: {{ template "postgres-operator.chart" . }} helm.sh/chart: {{ template "postgres-operator.chart" . }}

View File

@ -1,403 +0,0 @@
image:
registry: registry.opensource.zalan.do
repository: acid/postgres-operator
tag: v1.6.3
pullPolicy: "IfNotPresent"
# Optionally specify an array of imagePullSecrets.
# Secrets must be manually created in the namespace.
# ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
# imagePullSecrets:
# - name: myRegistryKeySecretName
podAnnotations: {}
podLabels: {}
configTarget: "OperatorConfigurationCRD"
# general top-level configuration parameters
configGeneral:
# choose if deployment creates/updates CRDs with OpenAPIV3Validation
enable_crd_validation: true
# update only the statefulsets without immediately doing the rolling update
enable_lazy_spilo_upgrade: false
# set the PGVERSION env var instead of providing the version via postgresql.bin_dir in SPILO_CONFIGURATION
enable_pgversion_env_var: true
# start any new database pod without limitations on shm memory
enable_shm_volume: true
# enables backwards compatible path between Spilo 12 and Spilo 13 images
enable_spilo_wal_path_compat: false
# etcd connection string for Patroni. Empty uses K8s-native DCS.
etcd_host: ""
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
# kubernetes_use_configmaps: false
# Spilo docker image
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p7
# min number of instances in Postgres cluster. -1 = no limit
min_instances: -1
# max number of instances in Postgres cluster. -1 = no limit
max_instances: -1
# period between consecutive repair requests
repair_period: 5m
# period between consecutive sync requests
resync_period: 30m
# can prevent certain cases of memory overcommitment
# set_memory_request_to_limit: false
# map of sidecar names to docker images
# sidecar_docker_images
# example: "exampleimage:exampletag"
# number of routines the operator spawns to process requests concurrently
workers: 8
# parameters describing Postgres users
configUsers:
# postgres username used for replication between instances
replication_username: standby
# postgres superuser name to be created by initdb
super_username: postgres
configMajorVersionUpgrade:
# "off": no upgrade, "manual": manifest triggers action, "full": minimal version violation triggers too
major_version_upgrade_mode: "off"
# minimal Postgres major version that will not automatically be upgraded
minimal_major_version: "9.5"
# target Postgres major version when upgrading clusters automatically
target_major_version: "13"
configKubernetes:
# list of additional capabilities for postgres container
# additional_pod_capabilities:
# - "SYS_NICE"
# default DNS domain of K8s cluster where operator is running
cluster_domain: cluster.local
# additional labels assigned to the cluster objects
cluster_labels:
application: spilo
# label assigned to Kubernetes objects created by the operator
cluster_name_label: cluster-name
# additional annotations to add to every database pod
# custom_pod_annotations:
# keya: valuea
# keyb: valueb
# key name for annotation that compares manifest value with current date
# delete_annotation_date_key: "delete-date"
# key name for annotation that compares manifest value with cluster name
# delete_annotation_name_key: "delete-clustername"
# list of annotations propagated from cluster manifest to statefulset and deployment
# downscaler_annotations:
# - deployment-time
# - downscaler/*
# enables initContainers to run actions before Spilo is started
enable_init_containers: true
# toggles pod anti affinity on the Postgres pods
enable_pod_antiaffinity: false
# toggles PDB to set to MinAvailabe 0 or 1
enable_pod_disruption_budget: true
# enables sidecar containers to run alongside Spilo in the same pod
enable_sidecars: true
# namespaced name of the secret containing infrastructure roles names and passwords
# infrastructure_roles_secret_name: postgresql-infrastructure-roles
# list of annotation keys that can be inherited from the cluster manifest
# inherited_annotations:
# - owned-by
# list of label keys that can be inherited from the cluster manifest
# inherited_labels:
# - application
# - environment
# timeout for successful migration of master pods from unschedulable node
# master_pod_move_timeout: 20m
# set of labels that a running and active node should possess to be considered ready
# node_readiness_label:
# status: ready
# namespaced name of the secret containing the OAuth2 token to pass to the teams API
# oauth_token_secret_name: postgresql-operator
# defines the template for PDB (Pod Disruption Budget) names
pdb_name_format: "postgres-{cluster}-pdb"
# override topology key for pod anti affinity
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
# namespaced name of the ConfigMap with environment variables to populate on every pod
# pod_environment_configmap: "default/my-custom-config"
# name of the Secret (in cluster namespace) with environment variables to populate on every pod
# pod_environment_secret: "my-custom-secret"
# specify the pod management policy of stateful sets of Postgres clusters
pod_management_policy: "ordered_ready"
# label assigned to the Postgres pods (and services/endpoints)
pod_role_label: spilo-role
# service account definition as JSON/YAML string to be used by postgres cluster pods
# pod_service_account_definition: ""
# role binding definition as JSON/YAML string to be used by pod service account
# pod_service_account_role_binding_definition: ""
# Postgres pods are terminated forcefully after this timeout
pod_terminate_grace_period: 5m
# template for database user secrets generated by the operator
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
# set user and group for the spilo container (required to run Spilo as non-root process)
# spilo_runasuser: "101"
# spilo_runasgroup: "103"
# group ID with write-access to volumes (required to run Spilo as non-root process)
# spilo_fsgroup: 103
# whether the Spilo container should run in privileged mode
spilo_privileged: false
# whether the Spilo container should run with additional permissions other than parent.
# required by cron which needs setuid
spilo_allow_privilege_escalation: true
# storage resize strategy, available options are: ebs, pvc, off
storage_resize_mode: pvc
# operator watches for postgres objects in the given namespace
watched_namespace: "*" # listen to all namespaces
# configure resource requests for the Postgres pods
configPostgresPodResources:
# CPU limits for the postgres containers
default_cpu_limit: "1"
# CPU request value for the postgres containers
default_cpu_request: 100m
# memory limits for the postgres containers
default_memory_limit: 500Mi
# memory request value for the postgres containers
default_memory_request: 100Mi
# hard CPU minimum required to properly run a Postgres cluster
min_cpu_limit: 250m
# hard memory minimum required to properly run a Postgres cluster
min_memory_limit: 250Mi
# timeouts related to some operator actions
configTimeouts:
# timeout when waiting for the Postgres pods to be deleted
pod_deletion_wait_timeout: 10m
# timeout when waiting for pod role and cluster labels
pod_label_wait_timeout: 10m
# interval between consecutive attempts waiting for postgresql CRD to be created
ready_wait_interval: 3s
# timeout for the complete postgres CRD creation
ready_wait_timeout: 30s
# interval to wait between consecutive attempts to check for some K8s resources
resource_check_interval: 3s
# timeout when waiting for the presence of a certain K8s resource (e.g. Sts, PDB)
resource_check_timeout: 10m
# configure behavior of load balancers
configLoadBalancer:
# DNS zone for cluster DNS name when load balancer is configured for cluster
db_hosted_zone: db.example.com
# annotations to apply to service when load balancing is enabled
# custom_service_annotations:
# keyx: valuez
# keya: valuea
# toggles service type load balancer pointing to the master pod of the cluster
enable_master_load_balancer: false
# toggles service type load balancer pointing to the replica pod of the cluster
enable_replica_load_balancer: false
# define external traffic policy for the load balancer
external_traffic_policy: "Cluster"
# defines the DNS name string template for the master load balancer cluster
master_dns_name_format: "{cluster}.{team}.{hostedzone}"
# defines the DNS name string template for the replica load balancer cluster
replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}"
# options to aid debugging of the operator itself
configDebug:
# toggles verbose debug logs from the operator
debug_logging: true
# toggles operator functionality that require access to the postgres database
enable_database_access: true
# parameters affecting logging and REST API listener
configLoggingRestApi:
# REST API listener listens to this port
api_port: 8080
# number of entries in the cluster history ring buffer
cluster_history_entries: 1000
# number of lines in the ring buffer used to store cluster logs
ring_log_lines: 100
# configure interaction with non-Kubernetes objects from AWS or GCP
configAwsOrGcp:
# Additional Secret (aws or gcp credentials) to mount in the pod
# additional_secret_mount: "some-secret-name"
# Path to mount the above Secret in the filesystem of the container(s)
# additional_secret_mount_path: "/some/dir"
# AWS region used to store ESB volumes
aws_region: eu-central-1
# enable automatic migration on AWS from gp2 to gp3 volumes
enable_ebs_gp3_migration: false
# defines maximum volume size in GB until which auto migration happens
# enable_ebs_gp3_migration_max_size: 1000
# GCP credentials that will be used by the operator / pods
# gcp_credentials: ""
# AWS IAM role to supply in the iam.amazonaws.com/role annotation of Postgres pods
# kube_iam_role: ""
# S3 bucket to use for shipping postgres daily logs
# log_s3_bucket: ""
# GCS bucket to use for shipping WAL segments with WAL-E
# wal_gs_bucket: ""
# S3 bucket to use for shipping WAL segments with WAL-E
# wal_s3_bucket: ""
# configure K8s cron job managed by the operator
configLogicalBackup:
# image for pods of the logical backup job (example runs pg_dumpall)
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.3"
# path of google cloud service account json file
# logical_backup_google_application_credentials: ""
# prefix for the backup job name
logical_backup_job_prefix: "logical-backup-"
# storage provider - either "s3" or "gcs"
logical_backup_provider: "s3"
# S3 Access Key ID
logical_backup_s3_access_key_id: ""
# S3 bucket to store backup results
logical_backup_s3_bucket: "my-bucket-url"
# S3 region of bucket
logical_backup_s3_region: ""
# S3 endpoint url when not using AWS
logical_backup_s3_endpoint: ""
# S3 Secret Access Key
logical_backup_s3_secret_access_key: ""
# S3 server side encryption
logical_backup_s3_sse: "AES256"
# backup schedule in the cron format
logical_backup_schedule: "30 00 * * *"
# automate creation of human users with teams API service
configTeamsApi:
# team_admin_role will have the rights to grant roles coming from PG manifests
enable_admin_role_for_users: true
# operator watches for PostgresTeam CRs to assign additional teams and members to clusters
enable_postgres_team_crd: false
# toogle to create additional superuser teams from PostgresTeam CRs
enable_postgres_team_crd_superusers: false
# toggle to automatically rename roles of former team members and deny LOGIN
enable_team_member_deprecation: false
# toggle to grant superuser to team members created from the Teams API
enable_team_superuser: false
# toggles usage of the Teams API by the operator
enable_teams_api: false
# should contain a URL to use for authentication (username and token)
# pam_configuration: ""
# operator will add all team member roles to this group and add a pg_hba line
pam_role_name: zalandos
# List of teams which members need the superuser role in each Postgres cluster
postgres_superuser_teams:
- postgres_superusers
# List of roles that cannot be overwritten by an application, team or infrastructure role
protected_role_names:
- admin
# Suffix to add if members are removed from TeamsAPI or PostgresTeam CRD
role_deletion_suffix: "_deleted"
# role name to grant to team members created from the Teams API
team_admin_role: admin
# postgres config parameters to apply to each team member role
team_api_role_configuration:
log_statement: all
# URL of the Teams API service
# teams_api_url: http://fake-teams-api.default.svc.cluster.local
configConnectionPooler:
# db schema to install lookup function into
connection_pooler_schema: "pooler"
# db user for pooler to use
connection_pooler_user: "pooler"
# docker image
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-16"
# max db connections the pooler should hold
connection_pooler_max_db_connections: 60
# default pooling mode
connection_pooler_mode: "transaction"
# number of pooler instances
connection_pooler_number_of_instances: 2
# default resources
connection_pooler_default_cpu_request: 500m
connection_pooler_default_memory_request: 100Mi
connection_pooler_default_cpu_limit: "1"
connection_pooler_default_memory_limit: 100Mi
rbac:
# Specifies whether RBAC resources should be created
create: true
crd:
# Specifies whether custom resource definitions should be created
# When using helm3, this is ignored; instead use "--skip-crds" to skip.
create: true
serviceAccount:
# Specifies whether a ServiceAccount should be created
create: true
# The name of the ServiceAccount to use.
# If not set and create is true, a name is generated using the fullname template
name:
podServiceAccount:
# The name of the ServiceAccount to be used by postgres cluster pods
# If not set a name is generated using the fullname template and "-pod" suffix
name: "postgres-pod"
# priority class for operator pod
priorityClassName: ""
# priority class for database pods
podPriorityClassName: ""
resources:
limits:
cpu: 500m
memory: 500Mi
requests:
cpu: 100m
memory: 250Mi
securityContext:
runAsUser: 1000
runAsNonRoot: true
readOnlyRootFilesystem: true
allowPrivilegeEscalation: false
# Affinity for pod assignment
# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
affinity: {}
# Node labels for pod assignment
# Ref: https://kubernetes.io/docs/user-guide/node-selection/
nodeSelector: {}
# Tolerations for pod assignment
# Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/
tolerations: []
controllerID:
# Specifies whether a controller ID should be defined for the operator
# Note, all postgres manifest must then contain the following annotation to be found by this operator
# "acid.zalan.do/controller": <controller-ID-of-the-operator>
create: false
# The name of the controller ID to use.
# If not set and create is true, a name is generated using the fullname template
name:

View File

@ -1,19 +1,19 @@
image: image:
registry: registry.opensource.zalan.do registry: registry.opensource.zalan.do
repository: acid/postgres-operator repository: acid/postgres-operator
tag: v1.6.3 tag: v1.7.0
pullPolicy: "IfNotPresent" pullPolicy: "IfNotPresent"
# Optionally specify an array of imagePullSecrets. # Optionally specify an array of imagePullSecrets.
# Secrets must be manually created in the namespace. # Secrets must be manually created in the namespace.
# ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod # ref: https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod
# imagePullSecrets: # imagePullSecrets:
# - name: myRegistryKeySecretName # - name: myRegistryKeySecretName
podAnnotations: {} podAnnotations: {}
podLabels: {} podLabels: {}
configTarget: "ConfigMap" configTarget: "OperatorConfigurationCRD"
# JSON logging format # JSON logging format
enableJsonLogging: false enableJsonLogging: false
@ -21,37 +21,38 @@ enableJsonLogging: false
# general configuration parameters # general configuration parameters
configGeneral: configGeneral:
# choose if deployment creates/updates CRDs with OpenAPIV3Validation # choose if deployment creates/updates CRDs with OpenAPIV3Validation
enable_crd_validation: "true" enable_crd_validation: true
# update only the statefulsets without immediately doing the rolling update # update only the statefulsets without immediately doing the rolling update
enable_lazy_spilo_upgrade: "false" enable_lazy_spilo_upgrade: false
# set the PGVERSION env var instead of providing the version via postgresql.bin_dir in SPILO_CONFIGURATION # set the PGVERSION env var instead of providing the version via postgresql.bin_dir in SPILO_CONFIGURATION
enable_pgversion_env_var: "true" enable_pgversion_env_var: true
# start any new database pod without limitations on shm memory # start any new database pod without limitations on shm memory
enable_shm_volume: "true" enable_shm_volume: true
# enables backwards compatible path between Spilo 12 and Spilo 13 images # enables backwards compatible path between Spilo 12 and Spilo 13 images
enable_spilo_wal_path_compat: "false" enable_spilo_wal_path_compat: false
# etcd connection string for Patroni. Empty uses K8s-native DCS. # etcd connection string for Patroni. Empty uses K8s-native DCS.
etcd_host: "" etcd_host: ""
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s) # Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
# kubernetes_use_configmaps: "false" # kubernetes_use_configmaps: false
# Spilo docker image # Spilo docker image
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p7 docker_image: registry.opensource.zalan.do/acid/spilo-13:2.1-p1
# min number of instances in Postgres cluster. -1 = no limit # min number of instances in Postgres cluster. -1 = no limit
min_instances: "-1" min_instances: -1
# max number of instances in Postgres cluster. -1 = no limit # max number of instances in Postgres cluster. -1 = no limit
max_instances: "-1" max_instances: -1
# period between consecutive repair requests # period between consecutive repair requests
repair_period: 5m repair_period: 5m
# period between consecutive sync requests # period between consecutive sync requests
resync_period: 30m resync_period: 30m
# can prevent certain cases of memory overcommitment # can prevent certain cases of memory overcommitment
# set_memory_request_to_limit: "false" # set_memory_request_to_limit: false
# map of sidecar names to docker images # map of sidecar names to docker images
# sidecar_docker_images: "" # sidecar_docker_images:
# example: "exampleimage:exampletag"
# number of routines the operator spawns to process requests concurrently # number of routines the operator spawns to process requests concurrently
workers: "8" workers: 8
# parameters describing Postgres users # parameters describing Postgres users
configUsers: configUsers:
@ -70,16 +71,20 @@ configMajorVersionUpgrade:
configKubernetes: configKubernetes:
# list of additional capabilities for postgres container # list of additional capabilities for postgres container
# additional_pod_capabilities: "SYS_NICE" # additional_pod_capabilities:
# - "SYS_NICE"
# default DNS domain of K8s cluster where operator is running # default DNS domain of K8s cluster where operator is running
cluster_domain: cluster.local cluster_domain: cluster.local
# additional labels assigned to the cluster objects # additional labels assigned to the cluster objects
cluster_labels: application:spilo cluster_labels:
application: spilo
# label assigned to Kubernetes objects created by the operator # label assigned to Kubernetes objects created by the operator
cluster_name_label: cluster-name cluster_name_label: cluster-name
# annotations attached to each database pod # additional annotations to add to every database pod
# custom_pod_annotations: "keya:valuea,keyb:valueb" # custom_pod_annotations:
# keya: valuea
# keyb: valueb
# key name for annotation that compares manifest value with current date # key name for annotation that compares manifest value with current date
# delete_annotation_date_key: "delete-date" # delete_annotation_date_key: "delete-date"
@ -88,30 +93,38 @@ configKubernetes:
# delete_annotation_name_key: "delete-clustername" # delete_annotation_name_key: "delete-clustername"
# list of annotations propagated from cluster manifest to statefulset and deployment # list of annotations propagated from cluster manifest to statefulset and deployment
# downscaler_annotations: "deployment-time,downscaler/*" # downscaler_annotations:
# - deployment-time
# - downscaler/*
# allow user secrets in other namespaces than the Postgres cluster
enable_cross_namespace_secret: false
# enables initContainers to run actions before Spilo is started # enables initContainers to run actions before Spilo is started
enable_init_containers: "true" enable_init_containers: true
# toggles pod anti affinity on the Postgres pods # toggles pod anti affinity on the Postgres pods
enable_pod_antiaffinity: "false" enable_pod_antiaffinity: false
# toggles PDB to set to MinAvailabe 0 or 1 # toggles PDB to set to MinAvailabe 0 or 1
enable_pod_disruption_budget: "true" enable_pod_disruption_budget: true
# enables sidecar containers to run alongside Spilo in the same pod # enables sidecar containers to run alongside Spilo in the same pod
enable_sidecars: "true" enable_sidecars: true
# namespaced name of the secret containing infrastructure roles names and passwords # namespaced name of the secret containing infrastructure roles names and passwords
# infrastructure_roles_secret_name: postgresql-infrastructure-roles # infrastructure_roles_secret_name: postgresql-infrastructure-roles
# list of annotation keys that can be inherited from the cluster manifest # list of annotation keys that can be inherited from the cluster manifest
# inherited_annotations: owned-by # inherited_annotations:
# - owned-by
# list of label keys that can be inherited from the cluster manifest # list of label keys that can be inherited from the cluster manifest
# inherited_labels: application,environment # inherited_labels:
# - application
# - environment
# timeout for successful migration of master pods from unschedulable node # timeout for successful migration of master pods from unschedulable node
# master_pod_move_timeout: 20m # master_pod_move_timeout: 20m
# set of labels that a running and active node should possess to be considered ready # set of labels that a running and active node should possess to be considered ready
# node_readiness_label: "" # node_readiness_label:
# status: ready
# namespaced name of the secret containing the OAuth2 token to pass to the teams API # namespaced name of the secret containing the OAuth2 token to pass to the teams API
# oauth_token_secret_name: postgresql-operator # oauth_token_secret_name: postgresql-operator
@ -137,19 +150,22 @@ configKubernetes:
# Postgres pods are terminated forcefully after this timeout # Postgres pods are terminated forcefully after this timeout
pod_terminate_grace_period: 5m pod_terminate_grace_period: 5m
# template for database user secrets generated by the operator # template for database user secrets generated by the operator,
# here username contains the namespace in the format namespace.username
# if the user is in different namespace than cluster and cross namespace secrets
# are enabled via `enable_cross_namespace_secret` flag in the configuration.
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}" secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
# set user and group for the spilo container (required to run Spilo as non-root process) # set user and group for the spilo container (required to run Spilo as non-root process)
# spilo_runasuser: "101" # spilo_runasuser: 101
# spilo_runasgroup: "103" # spilo_runasgroup: 103
# group ID with write-access to volumes (required to run Spilo as non-root process) # group ID with write-access to volumes (required to run Spilo as non-root process)
# spilo_fsgroup: "103" # spilo_fsgroup: 103
# whether the Spilo container should run in privileged mode # whether the Spilo container should run in privileged mode
spilo_privileged: "false" spilo_privileged: false
# whether the Spilo container should run with additional permissions other than parent. # whether the Spilo container should run with additional permissions other than parent.
# required by cron which needs setuid # required by cron which needs setuid
spilo_allow_privilege_escalation: "true" spilo_allow_privilege_escalation: true
# storage resize strategy, available options are: ebs, pvc, off # storage resize strategy, available options are: ebs, pvc, off
storage_resize_mode: pvc storage_resize_mode: pvc
# operator watches for postgres objects in the given namespace # operator watches for postgres objects in the given namespace
@ -190,34 +206,36 @@ configLoadBalancer:
# DNS zone for cluster DNS name when load balancer is configured for cluster # DNS zone for cluster DNS name when load balancer is configured for cluster
db_hosted_zone: db.example.com db_hosted_zone: db.example.com
# annotations to apply to service when load balancing is enabled # annotations to apply to service when load balancing is enabled
# custom_service_annotations: "keyx:valuez,keya:valuea" # custom_service_annotations:
# keyx: valuez
# keya: valuea
# toggles service type load balancer pointing to the master pod of the cluster # toggles service type load balancer pointing to the master pod of the cluster
enable_master_load_balancer: "false" enable_master_load_balancer: false
# toggles service type load balancer pointing to the replica pod of the cluster # toggles service type load balancer pointing to the replica pod of the cluster
enable_replica_load_balancer: "false" enable_replica_load_balancer: false
# define external traffic policy for the load balancer # define external traffic policy for the load balancer
external_traffic_policy: "Cluster" external_traffic_policy: "Cluster"
# defines the DNS name string template for the master load balancer cluster # defines the DNS name string template for the master load balancer cluster
master_dns_name_format: '{cluster}.{team}.{hostedzone}' master_dns_name_format: "{cluster}.{team}.{hostedzone}"
# defines the DNS name string template for the replica load balancer cluster # defines the DNS name string template for the replica load balancer cluster
replica_dns_name_format: '{cluster}-repl.{team}.{hostedzone}' replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}"
# options to aid debugging of the operator itself # options to aid debugging of the operator itself
configDebug: configDebug:
# toggles verbose debug logs from the operator # toggles verbose debug logs from the operator
debug_logging: "true" debug_logging: true
# toggles operator functionality that require access to the postgres database # toggles operator functionality that require access to the postgres database
enable_database_access: "true" enable_database_access: true
# parameters affecting logging and REST API listener # parameters affecting logging and REST API listener
configLoggingRestApi: configLoggingRestApi:
# REST API listener listens to this port # REST API listener listens to this port
api_port: "8080" api_port: 8080
# number of entries in the cluster history ring buffer # number of entries in the cluster history ring buffer
cluster_history_entries: "1000" cluster_history_entries: 1000
# number of lines in the ring buffer used to store cluster logs # number of lines in the ring buffer used to store cluster logs
ring_log_lines: "100" ring_log_lines: 100
# configure interaction with non-Kubernetes objects from AWS or GCP # configure interaction with non-Kubernetes objects from AWS or GCP
configAwsOrGcp: configAwsOrGcp:
@ -231,11 +249,11 @@ configAwsOrGcp:
aws_region: eu-central-1 aws_region: eu-central-1
# enable automatic migration on AWS from gp2 to gp3 volumes # enable automatic migration on AWS from gp2 to gp3 volumes
enable_ebs_gp3_migration: "false" enable_ebs_gp3_migration: false
# defines maximum volume size in GB until which auto migration happens # defines maximum volume size in GB until which auto migration happens
# enable_ebs_gp3_migration_max_size: "1000" # enable_ebs_gp3_migration_max_size: 1000
# GCP credentials for setting the GOOGLE_APPLICATION_CREDNETIALS environment variable # GCP credentials that will be used by the operator / pods
# gcp_credentials: "" # gcp_credentials: ""
# AWS IAM role to supply in the iam.amazonaws.com/role annotation of Postgres pods # AWS IAM role to supply in the iam.amazonaws.com/role annotation of Postgres pods
@ -250,10 +268,13 @@ configAwsOrGcp:
# GCS bucket to use for shipping WAL segments with WAL-E # GCS bucket to use for shipping WAL segments with WAL-E
# wal_gs_bucket: "" # wal_gs_bucket: ""
# Azure Storage Account to use for shipping WAL segments with WAL-G
# wal_az_storage_account: ""
# configure K8s cron job managed by the operator # configure K8s cron job managed by the operator
configLogicalBackup: configLogicalBackup:
# image for pods of the logical backup job (example runs pg_dumpall) # image for pods of the logical backup job (example runs pg_dumpall)
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.3" logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.7.0"
# path of google cloud service account json file # path of google cloud service account json file
# logical_backup_google_application_credentials: "" # logical_backup_google_application_credentials: ""
@ -265,10 +286,10 @@ configLogicalBackup:
logical_backup_s3_access_key_id: "" logical_backup_s3_access_key_id: ""
# S3 bucket to store backup results # S3 bucket to store backup results
logical_backup_s3_bucket: "my-bucket-url" logical_backup_s3_bucket: "my-bucket-url"
# S3 endpoint url when not using AWS
logical_backup_s3_endpoint: ""
# S3 region of bucket # S3 region of bucket
logical_backup_s3_region: "" logical_backup_s3_region: ""
# S3 endpoint url when not using AWS
logical_backup_s3_endpoint: ""
# S3 Secret Access Key # S3 Secret Access Key
logical_backup_s3_secret_access_key: "" logical_backup_s3_secret_access_key: ""
# S3 server side encryption # S3 server side encryption
@ -276,36 +297,38 @@ configLogicalBackup:
# backup schedule in the cron format # backup schedule in the cron format
logical_backup_schedule: "30 00 * * *" logical_backup_schedule: "30 00 * * *"
# automate creation of human users with teams API service # automate creation of human users with teams API service
configTeamsApi: configTeamsApi:
# team_admin_role will have the rights to grant roles coming from PG manifests # team_admin_role will have the rights to grant roles coming from PG manifests
enable_admin_role_for_users: "true" enable_admin_role_for_users: true
# operator watches for PostgresTeam CRs to assign additional teams and members to clusters # operator watches for PostgresTeam CRs to assign additional teams and members to clusters
enable_postgres_team_crd: "false" enable_postgres_team_crd: false
# toogle to create additional superuser teams from PostgresTeam CRs # toogle to create additional superuser teams from PostgresTeam CRs
enable_postgres_team_crd_superusers: "false" enable_postgres_team_crd_superusers: false
# toggle to automatically rename roles of former team members and deny LOGIN # toggle to automatically rename roles of former team members and deny LOGIN
enable_team_member_deprecation: "false" enable_team_member_deprecation: false
# toggle to grant superuser to team members created from the Teams API # toggle to grant superuser to team members created from the Teams API
enable_team_superuser: "false" enable_team_superuser: false
# toggles usage of the Teams API by the operator # toggles usage of the Teams API by the operator
enable_teams_api: "false" enable_teams_api: false
# should contain a URL to use for authentication (username and token) # should contain a URL to use for authentication (username and token)
# pam_configuration: https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees # pam_configuration: https://info.example.com/oauth2/tokeninfo?access_token= uid realm=/employees
# operator will add all team member roles to this group and add a pg_hba line # operator will add all team member roles to this group and add a pg_hba line
pam_role_name: "zalandos" pam_role_name: zalandos
# List of teams which members need the superuser role in each Postgres cluster # List of teams which members need the superuser role in each Postgres cluster
postgres_superuser_teams: "postgres_superusers" postgres_superuser_teams:
- postgres_superusers
# List of roles that cannot be overwritten by an application, team or infrastructure role # List of roles that cannot be overwritten by an application, team or infrastructure role
protected_role_names: "admin" protected_role_names:
- admin
# Suffix to add if members are removed from TeamsAPI or PostgresTeam CRD # Suffix to add if members are removed from TeamsAPI or PostgresTeam CRD
role_deletion_suffix: "_deleted" role_deletion_suffix: "_deleted"
# role name to grant to team members created from the Teams API # role name to grant to team members created from the Teams API
team_admin_role: "admin" team_admin_role: admin
# postgres config parameters to apply to each team member role # postgres config parameters to apply to each team member role
team_api_role_configuration: "log_statement:all" team_api_role_configuration:
log_statement: all
# URL of the Teams API service # URL of the Teams API service
# teams_api_url: http://fake-teams-api.default.svc.cluster.local # teams_api_url: http://fake-teams-api.default.svc.cluster.local
@ -316,13 +339,13 @@ configConnectionPooler:
# db user for pooler to use # db user for pooler to use
connection_pooler_user: "pooler" connection_pooler_user: "pooler"
# docker image # docker image
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-16" connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-18"
# max db connections the pooler should hold # max db connections the pooler should hold
connection_pooler_max_db_connections: "60" connection_pooler_max_db_connections: 60
# default pooling mode # default pooling mode
connection_pooler_mode: "transaction" connection_pooler_mode: "transaction"
# number of pooler instances # number of pooler instances
connection_pooler_number_of_instances: "2" connection_pooler_number_of_instances: 2
# default resources # default resources
connection_pooler_default_cpu_request: 500m connection_pooler_default_cpu_request: 500m
connection_pooler_default_memory_request: 100Mi connection_pooler_default_memory_request: 100Mi

View File

@ -3,6 +3,21 @@
Learn how to configure and manage the Postgres Operator in your Kubernetes (K8s) Learn how to configure and manage the Postgres Operator in your Kubernetes (K8s)
environment. environment.
## Upgrading the operator
The Postgres Operator is upgraded by changing the docker image within the
deployment. Before doing so, it is recommended to check the release notes
for new configuration options or changed behavior you might want to reflect
in the ConfigMap or config CRD. E.g. a new feature might get introduced which
is enabled or disabled by default and you want to change it to the opposite
with the corresponding flag option.
When using helm, be aware that installing the new chart will not update the
`Postgresql` and `OperatorConfiguration` CRD. Make sure to update them before
with the provided manifests in the `crds` folder. Otherwise, you might face
errors about new Postgres manifest or configuration options being unknown
to the CRD schema validation.
## Minor and major version upgrade ## Minor and major version upgrade
Minor version upgrades for PostgreSQL are handled via updating the Spilo Docker Minor version upgrades for PostgreSQL are handled via updating the Spilo Docker
@ -157,16 +172,26 @@ from numerous escape characters in the latter log entry, view it in CLI with
`PodTemplate` used by the operator is yet to be updated with the default values `PodTemplate` used by the operator is yet to be updated with the default values
used internally in K8s. used internally in K8s.
The operator also support lazy updates of the Spilo image. That means the pod The StatefulSet is replaced if the following properties change:
template of a PG cluster's stateful set is updated immediately with the new - annotations
image, but no rolling update follows. This feature saves you a switchover - and - volumeClaimTemplates
hence downtime - when you know pods are re-started later anyway, for instance - template volumes
due to the node rotation. To force a rolling update, disable this mode by
setting the `enable_lazy_spilo_upgrade` to `false` in the operator configuration The StatefulSet is replaced and a rolling updates is triggered if the following
and restart the operator pod. With the standard eager rolling updates the properties differ between the old and new state:
operator checks during Sync all pods run images specified in their respective - container name, ports, image, resources, env, envFrom, securityContext and volumeMounts
statefulsets. The operator triggers a rolling upgrade for PG clusters that - template labels, annotations, service account, securityContext, affinity, priority class and termination grace period
violate this condition.
Note that, changes in `SPILO_CONFIGURATION` env variable under `bootstrap.dcs`
path are ignored for the diff. They will be applied through Patroni's rest api
interface, following a restart of all instances.
The operator also support lazy updates of the Spilo image. In this case the
StatefulSet is only updated, but no rolling update follows. This feature saves
you a switchover - and hence downtime - when you know pods are re-started later
anyway, for instance due to the node rotation. To force a rolling update,
disable this mode by setting the `enable_lazy_spilo_upgrade` to `false` in the
operator configuration and restart the operator pod.
## Delete protection via annotations ## Delete protection via annotations
@ -663,6 +688,12 @@ if it ends up in your specified WAL backup path:
envdir "/run/etc/wal-e.d/env" /scripts/postgres_backup.sh "/home/postgres/pgdata/pgroot/data" envdir "/run/etc/wal-e.d/env" /scripts/postgres_backup.sh "/home/postgres/pgdata/pgroot/data"
``` ```
You can also check if Spilo is able to find any backups:
```bash
envdir "/run/etc/wal-e.d/env" wal-g backup-list
```
Depending on the cloud storage provider different [environment variables](https://github.com/zalando/spilo/blob/master/ENVIRONMENT.rst) Depending on the cloud storage provider different [environment variables](https://github.com/zalando/spilo/blob/master/ENVIRONMENT.rst)
have to be set for Spilo. Not all of them are generated automatically by the have to be set for Spilo. Not all of them are generated automatically by the
operator by changing its configuration. In this case you have to use an operator by changing its configuration. In this case you have to use an
@ -730,8 +761,15 @@ WALE_S3_ENDPOINT='https+path://s3.eu-central-1.amazonaws.com:443'
WALE_S3_PREFIX=$WAL_S3_BUCKET/spilo/{WAL_BUCKET_SCOPE_PREFIX}{SCOPE}{WAL_BUCKET_SCOPE_SUFFIX}/wal/{PGVERSION} WALE_S3_PREFIX=$WAL_S3_BUCKET/spilo/{WAL_BUCKET_SCOPE_PREFIX}{SCOPE}{WAL_BUCKET_SCOPE_SUFFIX}/wal/{PGVERSION}
``` ```
If the prefix is not specified Spilo will generate it from `WAL_S3_BUCKET`. The operator sets the prefix to an empty string so that spilo will generate it
When the `AWS_REGION` is set `AWS_ENDPOINT` and `WALE_S3_ENDPOINT` are from the configured `WAL_S3_BUCKET`.
:warning: When you overwrite the configuration by defining `WAL_S3_BUCKET` in
the [pod_environment_configmap](#custom-pod-environment-variables) you have
to set `WAL_BUCKET_SCOPE_PREFIX = ""`, too. Otherwise Spilo will not find
the physical backups on restore (next chapter).
When the `AWS_REGION` is set, `AWS_ENDPOINT` and `WALE_S3_ENDPOINT` are
generated automatically. `WALG_S3_PREFIX` is identical to `WALE_S3_PREFIX`. generated automatically. `WALG_S3_PREFIX` is identical to `WALE_S3_PREFIX`.
`SCOPE` is the Postgres cluster name. `SCOPE` is the Postgres cluster name.
@ -804,6 +842,63 @@ pod_environment_configmap: "postgres-operator-system/pod-env-overrides"
... ...
``` ```
### Azure setup
To configure the operator on Azure these prerequisites are needed:
* A storage account in the same region as the Kubernetes cluster.
The configuration parameters that we will be using are:
* `pod_environment_secret`
* `wal_az_storage_account`
1. Generate the K8s secret resource that will contain your storage account's
access key. You will need a copy of this secret in every namespace you want to
create postgresql clusters.
The latest version of WAL-G (v1.0) supports the use of a SASS token, but you'll
have to make due with using the primary or secondary access token until the
version of WAL-G is updated in the postgres-operator.
```yaml
apiVersion: v1
kind: Secret
metadata:
name: psql-backup-creds
namespace: default
type: Opaque
stringData:
AZURE_STORAGE_ACCESS_KEY: <primary or secondary access key>
```
2. Setup pod environment configmap that instructs the operator to use WAL-G,
instead of WAL-E, for backup and restore.
```yml
apiVersion: v1
kind: ConfigMap
metadata:
name: pod-env-overrides
namespace: postgres-operator-system
data:
# Any env variable used by spilo can be added
USE_WALG_BACKUP: "true"
USE_WALG_RESTORE: "true"
CLONE_USE_WALG_RESTORE: "true"
```
3. Setup your operator configuration values. With the `psql-backup-creds`
and `pod-env-overrides` resources applied to your cluster, ensure that the operator's configuration
is set up like the following:
```yml
...
aws_or_gcp:
pod_environment_secret: "pgsql-backup-creds"
pod_environment_configmap: "postgres-operator-system/pod-env-overrides"
wal_az_storage_account: "postgresbackupsbucket28302F2" # name of storage account to save the WAL-G logs
...
```
### Restoring physical backups ### Restoring physical backups
If cluster members have to be (re)initialized restoring physical backups If cluster members have to be (re)initialized restoring physical backups
@ -813,6 +908,36 @@ on one of the other running instances (preferably replicas if they do not lag
behind). You can test restoring backups by [cloning](user.md#how-to-clone-an-existing-postgresql-cluster) behind). You can test restoring backups by [cloning](user.md#how-to-clone-an-existing-postgresql-cluster)
clusters. clusters.
If you need to provide a [custom clone environment](#custom-pod-environment-variables)
copy existing variables about your setup (backup location, prefix, access
keys etc.) and prepend the `CLONE_` prefix to get them copied to the correct
directory within Spilo.
```yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: postgres-pod-config
data:
AWS_REGION: "eu-west-1"
AWS_ACCESS_KEY_ID: "****"
AWS_SECRET_ACCESS_KEY: "****"
...
CLONE_AWS_REGION: "eu-west-1"
CLONE_AWS_ACCESS_KEY_ID: "****"
CLONE_AWS_SECRET_ACCESS_KEY: "****"
...
```
### Standby clusters
The setup for [standby clusters](user.md#setting-up-a-standby-cluster) is very
similar to cloning. At the moment, the operator only allows for streaming from
the S3 WAL archive of the master specified in the manifest. Like with cloning,
if you are using [additional environment variables](#custom-pod-environment-variables)
to access your backup location you have to copy those variables and prepend the
`STANDBY_` prefix for Spilo to find the backups and WAL files to stream.
## Logical backups ## Logical backups
The operator can manage K8s cron jobs to run logical backups (SQL dumps) of The operator can manage K8s cron jobs to run logical backups (SQL dumps) of
@ -950,7 +1075,7 @@ make docker
# build in image in minikube docker env # build in image in minikube docker env
eval $(minikube docker-env) eval $(minikube docker-env)
docker build -t registry.opensource.zalan.do/acid/postgres-operator-ui:v1.6.3 . docker build -t registry.opensource.zalan.do/acid/postgres-operator-ui:v1.7.0 .
# apply UI manifests next to a running Postgres Operator # apply UI manifests next to a running Postgres Operator
kubectl apply -f manifests/ kubectl apply -f manifests/

View File

@ -314,13 +314,12 @@ Please, reflect your changes in tests, for example in:
For the CRD-based configuration, please update the following files: For the CRD-based configuration, please update the following files:
* the default [OperatorConfiguration](../manifests/postgresql-operator-default-configuration.yaml) * the default [OperatorConfiguration](../manifests/postgresql-operator-default-configuration.yaml)
* the Helm chart's [values-crd file](../charts/postgres-operator/values.yaml)
* the CRD's [validation](../manifests/operatorconfiguration.crd.yaml) * the CRD's [validation](../manifests/operatorconfiguration.crd.yaml)
* the CRD's validation in the [Helm chart](../charts/postgres-operator/crds/operatorconfigurations.yaml)
Reflect the changes in the ConfigMap configuration as well (note that numeric Add new options also to the Helm chart's [values file](../charts/postgres-operator/values.yaml) file.
and boolean parameters have to use double quotes here): It follows the OperatorConfiguration CRD layout. Nested values will be flattened for the ConfigMap.
* [ConfigMap](../manifests/configmap.yaml) manifest Last but no least, update the [ConfigMap](../manifests/configmap.yaml) manifest example as well.
* the Helm chart's default [values file](../charts/postgres-operator/values.yaml)
### Updating documentation ### Updating documentation

View File

@ -14,7 +14,7 @@ solutions:
* [kind](https://kind.sigs.k8s.io/) and [k3d](https://k3d.io), which allows creating multi-nodes K8s * [kind](https://kind.sigs.k8s.io/) and [k3d](https://k3d.io), which allows creating multi-nodes K8s
clusters running on Docker (requires Docker) clusters running on Docker (requires Docker)
To interact with the K8s infrastructure install it's CLI runtime [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl-binary-via-curl). To interact with the K8s infrastructure install its CLI runtime [kubectl](https://kubernetes.io/docs/tasks/tools/install-kubectl/#install-kubectl-binary-via-curl).
This quickstart assumes that you have started minikube or created a local kind This quickstart assumes that you have started minikube or created a local kind
cluster. Note that you can also use built-in K8s support in the Docker Desktop cluster. Note that you can also use built-in K8s support in the Docker Desktop
@ -81,16 +81,12 @@ the repo root. With Helm v3 installed you should be able to run:
helm install postgres-operator ./charts/postgres-operator helm install postgres-operator ./charts/postgres-operator
``` ```
To use CRD-based configuration you need to specify the [values-crd yaml file](../charts/postgres-operator/values-crd.yaml).
```bash
helm install postgres-operator ./charts/postgres-operator -f ./charts/postgres-operator/values-crd.yaml
```
The chart works with both Helm 2 and Helm 3. The `crd-install` hook from v2 will The chart works with both Helm 2 and Helm 3. The `crd-install` hook from v2 will
be skipped with warning when using v3. Documentation for installing applications be skipped with warning when using v3. Documentation for installing applications
with Helm 2 can be found in the [v2 docs](https://v2.helm.sh/docs/). with Helm 2 can be found in the [v2 docs](https://v2.helm.sh/docs/).
The chart is also hosted at: https://opensource.zalando.com/postgres-operator/charts/postgres-operator/
## Check if Postgres Operator is running ## Check if Postgres Operator is running
Starting the operator may take a few seconds. Check if the operator pod is Starting the operator may take a few seconds. Check if the operator pod is

View File

@ -109,7 +109,11 @@ These parameters are grouped directly under the `spec` key in the manifest.
`SUPERUSER`, `REPLICATION`, `INHERIT`, `LOGIN`, `NOLOGIN`, `CREATEROLE`, `SUPERUSER`, `REPLICATION`, `INHERIT`, `LOGIN`, `NOLOGIN`, `CREATEROLE`,
`CREATEDB`, `BYPASSURL`. A login user is created by default unless NOLOGIN is `CREATEDB`, `BYPASSURL`. A login user is created by default unless NOLOGIN is
specified, in which case the operator creates a role. One can specify empty specified, in which case the operator creates a role. One can specify empty
flags by providing a JSON empty array '*[]*'. Optional. flags by providing a JSON empty array '*[]*'. If the config option
`enable_cross_namespace_secrets` is enabled you can specify the namespace in
the user name in the form `{namespace}.{username}` and the operator will
create the K8s secret in that namespace. The part after the first `.` is
considered to be the user name. Optional.
* **databases** * **databases**
a map of database names to database owners for the databases that should be a map of database names to database owners for the databases that should be
@ -185,6 +189,35 @@ These parameters are grouped directly under the `spec` key in the manifest.
If you set the `all` special item, it will be mounted in all containers (postgres + sidecars). If you set the `all` special item, it will be mounted in all containers (postgres + sidecars).
Else you can set the list of target containers in which the additional volumes will be mounted (eg : postgres, telegraf) Else you can set the list of target containers in which the additional volumes will be mounted (eg : postgres, telegraf)
## Prepared Databases
The operator can create databases with default owner, reader and writer roles
without the need to specifiy them under `users` or `databases` sections. Those
parameters are grouped under the `preparedDatabases` top-level key. For more
information, see [user docs](../user.md#prepared-databases-with-roles-and-default-privileges).
* **defaultUsers**
The operator will always create default `NOLOGIN` roles for defined prepared
databases, but if `defaultUsers` is set to `true` three additional `LOGIN`
roles with `_user` suffix will get created. Default is `false`.
* **extensions**
map of extensions with target database schema that the operator will install
in the database. Optional.
* **schemas**
map of schemas that the operator will create. Optional - if no schema is
listed, the operator will create a schema called `data`. Under each schema
key, it can be defined if `defaultRoles` (NOLOGIN) and `defaultUsers` (LOGIN)
roles shall be created that have schema-exclusive privileges. Both flags are
set to `false` by default.
* **secretNamespace**
for each default LOGIN role the operator will create a secret. You can
specify the namespace in which these secrets will get created, if
`enable_cross_namespace_secrets` is set to `true` in the config. Otherwise,
the cluster namespace is used.
## Postgres parameters ## Postgres parameters
Those parameters are grouped under the `postgresql` top-level key, which is Those parameters are grouped under the `postgresql` top-level key, which is
@ -258,7 +291,9 @@ explanation of `ttl` and `loop_wait` parameters.
Those parameters define [CPU and memory requests and limits](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/) Those parameters define [CPU and memory requests and limits](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/)
for the Postgres container. They are grouped under the `resources` top-level for the Postgres container. They are grouped under the `resources` top-level
key with subgroups `requests` and `limits`. key with subgroups `requests` and `limits`. The whole section is optional,
however if you specify a request or limit you have to define everything
(unless you are not modifying the default CRD schema validation).
### Requests ### Requests
@ -266,11 +301,11 @@ CPU and memory requests for the Postgres container.
* **cpu** * **cpu**
CPU requests for the Postgres container. Optional, overrides the CPU requests for the Postgres container. Optional, overrides the
`default_cpu_requests` operator configuration parameter. Optional. `default_cpu_requests` operator configuration parameter.
* **memory** * **memory**
memory requests for the Postgres container. Optional, overrides the memory requests for the Postgres container. Optional, overrides the
`default_memory_request` operator configuration parameter. Optional. `default_memory_request` operator configuration parameter.
### Limits ### Limits
@ -278,11 +313,11 @@ CPU and memory limits for the Postgres container.
* **cpu** * **cpu**
CPU limits for the Postgres container. Optional, overrides the CPU limits for the Postgres container. Optional, overrides the
`default_cpu_limits` operator configuration parameter. Optional. `default_cpu_limits` operator configuration parameter.
* **memory** * **memory**
memory limits for the Postgres container. Optional, overrides the memory limits for the Postgres container. Optional, overrides the
`default_memory_limits` operator configuration parameter. Optional. `default_memory_limits` operator configuration parameter.
## Parameters defining how to clone the cluster from another one ## Parameters defining how to clone the cluster from another one
@ -364,6 +399,11 @@ properties of the persistent storage that stores Postgres data.
When running the operator on AWS the latest generation of EBS volumes (`gp3`) When running the operator on AWS the latest generation of EBS volumes (`gp3`)
allows for configuring the throughput in MB/s. Maximum is 1000. Optional. allows for configuring the throughput in MB/s. Maximum is 1000. Optional.
* **selector**
A label query over PVs to consider for binding. See the [Kubernetes
documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
for details on using `matchLabels` and `matchExpressions`. Optional
## Sidecar definitions ## Sidecar definitions
Those parameters are defined under the `sidecars` key. They consist of a list Those parameters are defined under the `sidecars` key. They consist of a list

View File

@ -264,6 +264,11 @@ configuration they are grouped under the `kubernetes` key.
[admin docs](../administrator.md#pod-disruption-budget) for more information. [admin docs](../administrator.md#pod-disruption-budget) for more information.
Default is true. Default is true.
* **enable_cross_namespace_secrets**
To allow secrets in a different namespace other than the Postgres cluster
namespace. Once enabled, specify the namespace in the user name under the
`users` section in the form `{namespace}.{username}`. The default is `false`.
* **enable_init_containers** * **enable_init_containers**
global option to allow for creating init containers in the cluster manifest to global option to allow for creating init containers in the cluster manifest to
run actions before Spilo is started. Default is true. run actions before Spilo is started. Default is true.
@ -275,11 +280,13 @@ configuration they are grouped under the `kubernetes` key.
* **secret_name_template** * **secret_name_template**
a template for the name of the database user secrets generated by the a template for the name of the database user secrets generated by the
operator. `{username}` is replaced with name of the secret, `{cluster}` with operator. `{namespace}` is replaced with name of the namespace if
the name of the cluster, `{tprkind}` with the kind of CRD (formerly known as `enable_cross_namespace_secret` is set, otherwise the
TPR) and `{tprgroup}` with the group of the CRD. No other placeholders are secret is in cluster's namespace. `{username}` is replaced with name of the
allowed. The default is secret, `{cluster}` with the name of the cluster, `{tprkind}` with the kind
`{username}.{cluster}.credentials.{tprkind}.{tprgroup}`. of CRD (formerly known as TPR) and `{tprgroup}` with the group of the CRD.
No other placeholders are allowed. The default is
`{namespace}.{username}.{cluster}.credentials.{tprkind}.{tprgroup}`.
* **cluster_domain** * **cluster_domain**
defines the default DNS domain for the kubernetes cluster the operator is defines the default DNS domain for the kubernetes cluster the operator is
@ -550,6 +557,12 @@ yet officially supported.
[service accounts](https://cloud.google.com/kubernetes-engine/docs/tutorials/authenticating-to-cloud-platform). [service accounts](https://cloud.google.com/kubernetes-engine/docs/tutorials/authenticating-to-cloud-platform).
The default is empty The default is empty
* **wal_az_storage_account**
Azure Storage Account to use for shipping WAL segments with WAL-G. The
storage account must exist and be accessible by Postgres pods. Note, only the
name of the storage account is required.
The default is empty.
* **log_s3_bucket** * **log_s3_bucket**
S3 bucket to use for shipping Postgres daily logs. Works only with S3 on AWS. S3 bucket to use for shipping Postgres daily logs. Works only with S3 on AWS.
The bucket has to be present and accessible by Postgres pods. The default is The bucket has to be present and accessible by Postgres pods. The default is
@ -593,7 +606,7 @@ grouped under the `logical_backup` key.
runs `pg_dumpall` on a replica if possible and uploads compressed results to runs `pg_dumpall` on a replica if possible and uploads compressed results to
an S3 bucket under the key `/spilo/pg_cluster_name/cluster_k8s_uuid/logical_backups`. an S3 bucket under the key `/spilo/pg_cluster_name/cluster_k8s_uuid/logical_backups`.
The default image is the same image built with the Zalando-internal CI The default image is the same image built with the Zalando-internal CI
pipeline. Default: "registry.opensource.zalan.do/acid/logical-backup:v1.6.3" pipeline. Default: "registry.opensource.zalan.do/acid/logical-backup:v1.7.0"
* **logical_backup_google_application_credentials** * **logical_backup_google_application_credentials**
Specifies the path of the google cloud service account json file. Default is empty. Specifies the path of the google cloud service account json file. Default is empty.

View File

@ -139,6 +139,26 @@ secret, without ever sharing it outside of the cluster.
At the moment it is not possible to define membership of the manifest role in At the moment it is not possible to define membership of the manifest role in
other roles. other roles.
To define the secrets for the users in a different namespace than that of the
cluster, one can set `enable_cross_namespace_secret` and declare the namespace
for the secrets in the manifest in the following manner,
```yaml
spec:
users:
#users with secret in dfferent namespace
appspace.db_user:
- createdb
```
Here, anything before the first dot is considered the namespace and the text after
the first dot is the username. Also, the postgres roles of these usernames would
be in the form of `namespace.username`.
For such usernames, the secret is created in the given namespace and its name is
of the following form,
`{namespace}.{username}.{team}-{clustername}.credentials.postgresql.acid.zalan.do`
### Infrastructure roles ### Infrastructure roles
An infrastructure role is a role that should be present on every PostgreSQL An infrastructure role is a role that should be present on every PostgreSQL
@ -501,9 +521,10 @@ Then, the schemas are owned by the database owner, too.
The roles described in the previous paragraph can be granted to LOGIN roles from The roles described in the previous paragraph can be granted to LOGIN roles from
the `users` section in the manifest. Optionally, the Postgres Operator can also the `users` section in the manifest. Optionally, the Postgres Operator can also
create default LOGIN roles for the database an each schema individually. These create default LOGIN roles for the database and each schema individually. These
roles will get the `_user` suffix and they inherit all rights from their NOLOGIN roles will get the `_user` suffix and they inherit all rights from their NOLOGIN
counterparts. counterparts. Therefore, you cannot have `defaultRoles` set to `false` and enable
`defaultUsers` at the same time.
| Role name | Member of | Admin | | Role name | Member of | Admin |
| ------------------- | -------------- | ------------- | | ------------------- | -------------- | ------------- |
@ -526,6 +547,23 @@ spec:
defaultUsers: true defaultUsers: true
``` ```
Default access privileges are also defined for LOGIN roles on database and
schema creation. This means they are currently not set when `defaultUsers`
(or `defaultRoles` for schemas) are enabled at a later point in time.
For all LOGIN roles the operator will create K8s secrets in the namespace
specified in `secretNamespace`, if `enable_cross_namespace_secret` is set to
`true` in the config. Otherwise, they are created in the same namespace like
the Postgres cluster.
```yaml
spec:
preparedDatabases:
foo:
defaultUsers: true
secretNamespace: appspace
```
### Schema `search_path` for default roles ### Schema `search_path` for default roles
The schema [`search_path`](https://www.postgresql.org/docs/13/ddl-schemas.html#DDL-SCHEMAS-PATH) The schema [`search_path`](https://www.postgresql.org/docs/13/ddl-schemas.html#DDL-SCHEMAS-PATH)
@ -695,20 +733,21 @@ spec:
uid: "efd12e58-5786-11e8-b5a7-06148230260c" uid: "efd12e58-5786-11e8-b5a7-06148230260c"
cluster: "acid-batman" cluster: "acid-batman"
timestamp: "2017-12-19T12:40:33+01:00" timestamp: "2017-12-19T12:40:33+01:00"
s3_wal_path: "s3://<bucketname>/spilo/<source_db_cluster>/<UID>/wal/<PGVERSION>"
``` ```
Here `cluster` is a name of a source cluster that is going to be cloned. A new Here `cluster` is a name of a source cluster that is going to be cloned. A new
cluster will be cloned from S3, using the latest backup before the `timestamp`. cluster will be cloned from S3, using the latest backup before the `timestamp`.
Note, that a time zone is required for `timestamp` in the format of +00:00 which Note, that a time zone is required for `timestamp` in the format of +00:00 which
is UTC. The `uid` field is also mandatory. The operator will use it to find a is UTC. You can specify the `s3_wal_path` of the source cluster or let the
correct key inside an S3 bucket. You can find this field in the metadata of the operator try to find it based on the configured `wal_[s3|gs]_bucket` and the
source cluster: specified `uid`. You can find the UID of the source cluster in its metadata:
```yaml ```yaml
apiVersion: acid.zalan.do/v1 apiVersion: acid.zalan.do/v1
kind: postgresql kind: postgresql
metadata: metadata:
name: acid-test-cluster name: acid-batman
uid: efd12e58-5786-11e8-b5a7-06148230260c uid: efd12e58-5786-11e8-b5a7-06148230260c
``` ```
@ -761,7 +800,7 @@ no statefulset will be created.
```yaml ```yaml
spec: spec:
standby: standby:
s3_wal_path: "s3 bucket path to the master" s3_wal_path: "s3://<bucketname>/spilo/<source_db_cluster>/<UID>/wal/<PGVERSION>"
``` ```
At the moment, the operator only allows to stream from the WAL archive of the At the moment, the operator only allows to stream from the WAL archive of the

View File

@ -156,6 +156,10 @@ class K8s:
while not get_services(): while not get_services():
time.sleep(self.RETRY_TIMEOUT_SEC) time.sleep(self.RETRY_TIMEOUT_SEC)
def count_pods_with_rolling_update_flag(self, labels, namespace='default'):
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
return len(list(filter(lambda x: "zalando-postgres-operator-rolling-update-required" in x.metadata.annotations, pods)))
def count_pods_with_label(self, labels, namespace='default'): def count_pods_with_label(self, labels, namespace='default'):
return len(self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items) return len(self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items)
@ -189,6 +193,7 @@ class K8s:
def wait_for_pod_failover(self, failover_targets, labels, namespace='default'): def wait_for_pod_failover(self, failover_targets, labels, namespace='default'):
pod_phase = 'Failing over' pod_phase = 'Failing over'
new_pod_node = '' new_pod_node = ''
pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace)
while (pod_phase != 'Running') or (new_pod_node not in failover_targets): while (pod_phase != 'Running') or (new_pod_node not in failover_targets):
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
@ -197,6 +202,20 @@ class K8s:
pod_phase = pods[0].status.phase pod_phase = pods[0].status.phase
time.sleep(self.RETRY_TIMEOUT_SEC) time.sleep(self.RETRY_TIMEOUT_SEC)
while pods_with_update_flag != 0:
pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace)
time.sleep(self.RETRY_TIMEOUT_SEC)
def wait_for_namespace_creation(self, namespace='default'):
ns_found = False
while ns_found != True:
ns = self.api.core_v1.list_namespace().items
for n in ns:
if n.metadata.name == namespace:
ns_found = True
break
time.sleep(self.RETRY_TIMEOUT_SEC)
def get_logical_backup_job(self, namespace='default'): def get_logical_backup_job(self, namespace='default'):
return self.api.batch_v1_beta1.list_namespaced_cron_job(namespace, label_selector="application=spilo") return self.api.batch_v1_beta1.list_namespaced_cron_job(namespace, label_selector="application=spilo")
@ -233,6 +252,13 @@ class K8s:
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE) stderr=subprocess.PIPE)
def patroni_rest(self, pod, path):
r = self.exec_with_kubectl(pod, "curl localhost:8008/" + path)
if not r.returncode == 0 or not r.stdout.decode()[0:1] == "{":
return None
return json.loads(r.stdout.decode())
def get_patroni_state(self, pod): def get_patroni_state(self, pod):
r = self.exec_with_kubectl(pod, "patronictl list -f json") r = self.exec_with_kubectl(pod, "patronictl list -f json")
if not r.returncode == 0 or not r.stdout.decode()[0:1] == "[": if not r.returncode == 0 or not r.stdout.decode()[0:1] == "[":
@ -413,6 +439,10 @@ class K8sBase:
while not get_services(): while not get_services():
time.sleep(self.RETRY_TIMEOUT_SEC) time.sleep(self.RETRY_TIMEOUT_SEC)
def count_pods_with_rolling_update_flag(self, labels, namespace='default'):
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
return len(list(filter(lambda x: "zalando-postgres-operator-rolling-update-required" in x.metadata.annotations, pods)))
def count_pods_with_label(self, labels, namespace='default'): def count_pods_with_label(self, labels, namespace='default'):
return len(self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items) return len(self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items)
@ -446,6 +476,7 @@ class K8sBase:
def wait_for_pod_failover(self, failover_targets, labels, namespace='default'): def wait_for_pod_failover(self, failover_targets, labels, namespace='default'):
pod_phase = 'Failing over' pod_phase = 'Failing over'
new_pod_node = '' new_pod_node = ''
pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace)
while (pod_phase != 'Running') or (new_pod_node not in failover_targets): while (pod_phase != 'Running') or (new_pod_node not in failover_targets):
pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items pods = self.api.core_v1.list_namespaced_pod(namespace, label_selector=labels).items
@ -454,6 +485,10 @@ class K8sBase:
pod_phase = pods[0].status.phase pod_phase = pods[0].status.phase
time.sleep(self.RETRY_TIMEOUT_SEC) time.sleep(self.RETRY_TIMEOUT_SEC)
while pods_with_update_flag != 0:
pods_with_update_flag = self.count_pods_with_rolling_update_flag(labels, namespace)
time.sleep(self.RETRY_TIMEOUT_SEC)
def get_logical_backup_job(self, namespace='default'): def get_logical_backup_job(self, namespace='default'):
return self.api.batch_v1_beta1.list_namespaced_cron_job(namespace, label_selector="application=spilo") return self.api.batch_v1_beta1.list_namespaced_cron_job(namespace, label_selector="application=spilo")
@ -486,6 +521,13 @@ class K8sBase:
stdout=subprocess.PIPE, stdout=subprocess.PIPE,
stderr=subprocess.PIPE) stderr=subprocess.PIPE)
def patroni_rest(self, pod, path):
r = self.exec_with_kubectl(pod, "curl localhost:8008/" + path)
if not r.returncode == 0 or not r.stdout.decode()[0:1] == "{":
return None
return json.loads(r.stdout.decode())
def get_patroni_state(self, pod): def get_patroni_state(self, pod):
r = self.exec_with_kubectl(pod, "patronictl list -f json") r = self.exec_with_kubectl(pod, "patronictl list -f json")
if not r.returncode == 0 or not r.stdout.decode()[0:1] == "[": if not r.returncode == 0 or not r.stdout.decode()[0:1] == "[":

View File

@ -290,38 +290,39 @@ class EndToEndTestCase(unittest.TestCase):
"Operator does not get in sync") "Operator does not get in sync")
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_overwrite_pooler_deployment(self): def test_cross_namespace_secrets(self):
self.k8s.create_with_kubectl("manifests/minimal-fake-pooler-deployment.yaml") '''
self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") Test secrets in different namespace
self.eventuallyEqual(lambda: self.k8s.get_deployment_replica_count(name="acid-minimal-cluster-pooler"), 1, '''
"Initial broken deployment not rolled out") k8s = self.k8s
# enable secret creation in separate namespace
patch_cross_namespace_secret = {
"data": {
"enable_cross_namespace_secret": "true"
}
}
self.k8s.update_config(patch_cross_namespace_secret,
step="cross namespace secrets enabled")
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},
"Operator does not get in sync")
# create secret in test namespace
self.k8s.api.custom_objects_api.patch_namespaced_custom_object( self.k8s.api.custom_objects_api.patch_namespaced_custom_object(
'acid.zalan.do', 'v1', 'default', 'acid.zalan.do', 'v1', 'default',
'postgresqls', 'acid-minimal-cluster', 'postgresqls', 'acid-minimal-cluster',
{ {
'spec': { 'spec': {
'enableConnectionPooler': True 'users':{
'test.db_user': [],
}
} }
}) })
self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"},
self.eventuallyEqual(lambda: self.k8s.get_deployment_replica_count(name="acid-minimal-cluster-pooler"), 2, "Operator does not get in sync")
"Operator did not succeed in overwriting labels") self.eventuallyEqual(lambda: self.k8s.count_secrets_with_label("cluster-name=acid-minimal-cluster,application=spilo", self.test_namespace),
1, "Secret not created for user in namespace")
self.k8s.api.custom_objects_api.patch_namespaced_custom_object(
'acid.zalan.do', 'v1', 'default',
'postgresqls', 'acid-minimal-cluster',
{
'spec': {
'enableConnectionPooler': False
}
})
self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
self.eventuallyEqual(lambda: self.k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"),
0, "Pooler pods not scaled down")
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_enable_disable_connection_pooler(self): def test_enable_disable_connection_pooler(self):
@ -568,6 +569,7 @@ class EndToEndTestCase(unittest.TestCase):
role.pop("Password", None) role.pop("Password", None)
self.assertDictEqual(role, { self.assertDictEqual(role, {
"Name": "robot_zmon_acid_monitoring_new", "Name": "robot_zmon_acid_monitoring_new",
"Namespace":"",
"Flags": None, "Flags": None,
"MemberOf": ["robot_zmon"], "MemberOf": ["robot_zmon"],
"Parameters": None, "Parameters": None,
@ -753,6 +755,33 @@ class EndToEndTestCase(unittest.TestCase):
# ensure cluster is healthy after tests # ensure cluster is healthy after tests
self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running") self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running")
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
@unittest.skip("Skipping this test until fixed")
def test_major_version_upgrade(self):
k8s = self.k8s
result = k8s.create_with_kubectl("manifests/minimal-postgres-manifest-12.yaml")
self.eventuallyEqual(lambda: k8s.count_running_pods(labels="application=spilo,cluster-name=acid-upgrade-test"), 2, "No 2 pods running")
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
pg_patch_version = {
"spec": {
"postgres": {
"version": "13"
}
}
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version)
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
def check_version_13():
p = k8s.get_patroni_state("acid-upgrade-test-0")
version = p["server_version"][0:2]
return version
self.evantuallyEqual(check_version_13, "13", "Version was not upgrade to 13")
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_min_resource_limits(self): def test_min_resource_limits(self):
''' '''
@ -823,6 +852,7 @@ class EndToEndTestCase(unittest.TestCase):
try: try:
k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml") k8s.create_with_kubectl("manifests/complete-postgres-manifest.yaml")
k8s.wait_for_pod_start("spilo-role=master", self.test_namespace) k8s.wait_for_pod_start("spilo-role=master", self.test_namespace)
k8s.wait_for_pod_start("spilo-role=replica", self.test_namespace)
self.assert_master_is_unique(self.test_namespace, "acid-test-cluster") self.assert_master_is_unique(self.test_namespace, "acid-test-cluster")
except timeout_decorator.TimeoutError: except timeout_decorator.TimeoutError:
@ -836,360 +866,6 @@ class EndToEndTestCase(unittest.TestCase):
"acid.zalan.do", "v1", self.test_namespace, "postgresqls", "acid-test-cluster") "acid.zalan.do", "v1", self.test_namespace, "postgresqls", "acid-test-cluster")
time.sleep(5) time.sleep(5)
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_rolling_update_flag(self):
'''
Add rolling update flag to only the master and see it failing over
'''
k8s = self.k8s
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
# verify we are in good state from potential previous tests
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running")
# get node and replica (expected target of new master)
_, replica_nodes = k8s.get_pg_nodes(cluster_label)
# rolling update annotation
flag = {
"metadata": {
"annotations": {
"zalando-postgres-operator-rolling-update-required": "true",
}
}
}
try:
podsList = k8s.api.core_v1.list_namespaced_pod('default', label_selector=cluster_label)
for pod in podsList.items:
# add flag only to the master to make it appear to the operator as a leftover from a rolling update
if pod.metadata.labels.get('spilo-role') == 'master':
old_creation_timestamp = pod.metadata.creation_timestamp
k8s.patch_pod(flag, pod.metadata.name, pod.metadata.namespace)
else:
# remember replica name to check if operator does a switchover
switchover_target = pod.metadata.name
# do not wait until the next sync
k8s.delete_operator_pod()
# operator should now recreate the master pod and do a switchover before
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
# check if the former replica is now the new master
leader = k8s.get_cluster_leader_pod()
self.eventuallyEqual(lambda: leader.metadata.name, switchover_target, "Rolling update flag did not trigger switchover")
# check that the old master has been recreated
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
replica = k8s.get_cluster_replica_pod()
self.assertTrue(replica.metadata.creation_timestamp > old_creation_timestamp, "Old master pod was not recreated")
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_rolling_update_label_timeout(self):
'''
Simulate case when replica does not receive label in time and rolling update does not finish
'''
k8s = self.k8s
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
flag = "zalando-postgres-operator-rolling-update-required"
# verify we are in good state from potential previous tests
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running")
# get node and replica (expected target of new master)
_, replica_nodes = k8s.get_pg_nodes(cluster_label)
# rolling update annotation
rolling_update_patch = {
"metadata": {
"annotations": {
flag: "true",
}
}
}
# make pod_label_wait_timeout so short that rolling update fails on first try
# temporarily lower resync interval to reduce waiting for further tests
# pods should get healthy in the meantime
patch_resync_config = {
"data": {
"pod_label_wait_timeout": "2s",
"resync_period": "20s",
}
}
try:
# patch both pods for rolling update
podList = k8s.api.core_v1.list_namespaced_pod('default', label_selector=cluster_label)
for pod in podList.items:
k8s.patch_pod(rolling_update_patch, pod.metadata.name, pod.metadata.namespace)
if pod.metadata.labels.get('spilo-role') == 'replica':
switchover_target = pod.metadata.name
# update config and restart operator
k8s.update_config(patch_resync_config, "update resync interval and pod_label_wait_timeout")
# operator should now recreate the replica pod first and do a switchover after
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
# pod_label_wait_timeout should have been exceeded hence the rolling update is continued on next sync
# check if the cluster state is "SyncFailed"
self.eventuallyEqual(lambda: k8s.pg_get_status(), "SyncFailed", "Expected SYNC event to fail")
# wait for next sync, replica should be running normally by now and be ready for switchover
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
# check if the former replica is now the new master
leader = k8s.get_cluster_leader_pod()
self.eventuallyEqual(lambda: leader.metadata.name, switchover_target, "Rolling update flag did not trigger switchover")
# wait for the old master to get restarted
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
# status should again be "SyncFailed" but turn into "Running" on the next sync
time.sleep(10)
self.eventuallyEqual(lambda: k8s.pg_get_status(), "Running", "Expected running cluster after two syncs")
# revert config changes
patch_resync_config = {
"data": {
"pod_label_wait_timeout": "10m",
"resync_period": "30m",
}
}
k8s.update_config(patch_resync_config, "revert resync interval and pod_label_wait_timeout")
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_zz_node_readiness_label(self):
'''
Remove node readiness label from master node. This must cause a failover.
'''
k8s = self.k8s
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
readiness_label = 'lifecycle-status'
readiness_value = 'ready'
try:
# get nodes of master and replica(s) (expected target of new master)
current_master_node, current_replica_nodes = k8s.get_pg_nodes(cluster_label)
num_replicas = len(current_replica_nodes)
failover_targets = self.get_failover_targets(current_master_node, current_replica_nodes)
# add node_readiness_label to potential failover nodes
patch_readiness_label = {
"metadata": {
"labels": {
readiness_label: readiness_value
}
}
}
self.assertTrue(len(failover_targets) > 0, "No failover targets available")
for failover_target in failover_targets:
k8s.api.core_v1.patch_node(failover_target, patch_readiness_label)
# define node_readiness_label in config map which should trigger a failover of the master
patch_readiness_label_config = {
"data": {
"node_readiness_label": readiness_label + ':' + readiness_value,
}
}
k8s.update_config(patch_readiness_label_config, "setting readiness label")
new_master_node, new_replica_nodes = self.assert_failover(
current_master_node, num_replicas, failover_targets, cluster_label)
# patch also node where master ran before
k8s.api.core_v1.patch_node(current_master_node, patch_readiness_label)
# toggle pod anti affinity to move replica away from master node
self.eventuallyTrue(lambda: self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label), "Pods are redistributed")
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_scaling(self):
'''
Scale up from 2 to 3 and back to 2 pods by updating the Postgres manifest at runtime.
'''
k8s = self.k8s
pod = "acid-minimal-cluster-0"
k8s.scale_cluster(3)
self.eventuallyEqual(lambda: k8s.count_running_pods(), 3, "Scale up to 3 failed")
self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod)), 3, "Not all 3 nodes healthy")
k8s.scale_cluster(2)
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "Scale down to 2 failed")
self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod)), 2, "Not all members 2 healthy")
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_service_annotations(self):
'''
Create a Postgres cluster with service annotations and check them.
'''
k8s = self.k8s
patch_custom_service_annotations = {
"data": {
"custom_service_annotations": "foo:bar",
}
}
k8s.update_config(patch_custom_service_annotations)
pg_patch_custom_annotations = {
"spec": {
"serviceAnnotations": {
"annotation.key": "value",
"alice": "bob",
}
}
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_custom_annotations)
annotations = {
"annotation.key": "value",
"foo": "bar",
"alice": "bob"
}
self.eventuallyTrue(lambda: k8s.check_service_annotations("cluster-name=acid-minimal-cluster,spilo-role=master", annotations), "Wrong annotations")
self.eventuallyTrue(lambda: k8s.check_service_annotations("cluster-name=acid-minimal-cluster,spilo-role=replica", annotations), "Wrong annotations")
# clean up
unpatch_custom_service_annotations = {
"data": {
"custom_service_annotations": "",
}
}
k8s.update_config(unpatch_custom_service_annotations)
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_statefulset_annotation_propagation(self):
'''
Inject annotation to Postgresql CRD and check it's propagation to stateful set
'''
k8s = self.k8s
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
patch_sset_propagate_annotations = {
"data": {
"downscaler_annotations": "deployment-time,downscaler/*",
"inherited_annotations": "owned-by",
}
}
k8s.update_config(patch_sset_propagate_annotations)
pg_crd_annotations = {
"metadata": {
"annotations": {
"deployment-time": "2020-04-30 12:00:00",
"downscaler/downtime_replicas": "0",
"owned-by": "acid",
},
}
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_crd_annotations)
annotations = {
"deployment-time": "2020-04-30 12:00:00",
"downscaler/downtime_replicas": "0",
"owned-by": "acid",
}
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
self.eventuallyTrue(lambda: k8s.check_statefulset_annotations(cluster_label, annotations), "Annotations missing")
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
@unittest.skip("Skipping this test until fixed")
def test_zaa_test_major_version_upgrade(self):
k8s = self.k8s
result = k8s.create_with_kubectl("manifests/minimal-postgres-manifest-12.yaml")
self.eventuallyEqual(lambda: k8s.count_running_pods(labels="application=spilo,cluster-name=acid-upgrade-test"), 2, "No 2 pods running")
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
pg_patch_version = {
"spec": {
"postgres": {
"version": "13"
}
}
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version)
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
def check_version_13():
p = k8s.get_patroni_state("acid-upgrade-test-0")
version = p["server_version"][0:2]
return version
self.evantuallyEqual(check_version_13, "13", "Version was not upgrade to 13")
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
@unittest.skip("Skipping this test until fixed")
def test_zzz_taint_based_eviction(self):
'''
Add taint "postgres=:NoExecute" to node with master. This must cause a failover.
'''
k8s = self.k8s
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
# verify we are in good state from potential previous tests
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running")
self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running")
# get nodes of master and replica(s) (expected target of new master)
master_nodes, replica_nodes = k8s.get_cluster_nodes()
self.assertNotEqual(master_nodes, [])
self.assertNotEqual(replica_nodes, [])
# taint node with postgres=:NoExecute to force failover
body = {
"spec": {
"taints": [
{
"effect": "NoExecute",
"key": "postgres"
}
]
}
}
k8s.api.core_v1.patch_node(master_nodes[0], body)
self.eventuallyTrue(lambda: k8s.get_cluster_nodes()[0], replica_nodes)
self.assertNotEqual(lambda: k8s.get_cluster_nodes()[0], master_nodes)
# add toleration to pods
patch_toleration_config = {
"data": {
"toleration": "key:postgres,operator:Exists,effect:NoExecute"
}
}
k8s.update_config(patch_toleration_config, step="allow tainted nodes")
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running")
self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running")
# toggle pod anti affinity to move replica away from master node
nm, new_replica_nodes = k8s.get_cluster_nodes()
new_master_node = nm[0]
self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label)
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_node_affinity(self): def test_node_affinity(self):
''' '''
@ -1299,7 +975,449 @@ class EndToEndTestCase(unittest.TestCase):
raise raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_zzzz_cluster_deletion(self): def test_node_readiness_label(self):
'''
Remove node readiness label from master node. This must cause a failover.
'''
k8s = self.k8s
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
readiness_label = 'lifecycle-status'
readiness_value = 'ready'
try:
# get nodes of master and replica(s) (expected target of new master)
current_master_node, current_replica_nodes = k8s.get_pg_nodes(cluster_label)
num_replicas = len(current_replica_nodes)
failover_targets = self.get_failover_targets(current_master_node, current_replica_nodes)
# add node_readiness_label to potential failover nodes
patch_readiness_label = {
"metadata": {
"labels": {
readiness_label: readiness_value
}
}
}
self.assertTrue(len(failover_targets) > 0, "No failover targets available")
for failover_target in failover_targets:
k8s.api.core_v1.patch_node(failover_target, patch_readiness_label)
# define node_readiness_label in config map which should trigger a failover of the master
patch_readiness_label_config = {
"data": {
"node_readiness_label": readiness_label + ':' + readiness_value,
}
}
k8s.update_config(patch_readiness_label_config, "setting readiness label")
new_master_node, new_replica_nodes = self.assert_failover(
current_master_node, num_replicas, failover_targets, cluster_label)
# patch also node where master ran before
k8s.api.core_v1.patch_node(current_master_node, patch_readiness_label)
# toggle pod anti affinity to move replica away from master node
self.eventuallyTrue(lambda: self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label), "Pods are redistributed")
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_overwrite_pooler_deployment(self):
self.k8s.create_with_kubectl("manifests/minimal-fake-pooler-deployment.yaml")
self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
self.eventuallyEqual(lambda: self.k8s.get_deployment_replica_count(name="acid-minimal-cluster-pooler"), 1,
"Initial broken deployment not rolled out")
self.k8s.api.custom_objects_api.patch_namespaced_custom_object(
'acid.zalan.do', 'v1', 'default',
'postgresqls', 'acid-minimal-cluster',
{
'spec': {
'enableConnectionPooler': True
}
})
self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
self.eventuallyEqual(lambda: self.k8s.get_deployment_replica_count(name="acid-minimal-cluster-pooler"), 2,
"Operator did not succeed in overwriting labels")
self.k8s.api.custom_objects_api.patch_namespaced_custom_object(
'acid.zalan.do', 'v1', 'default',
'postgresqls', 'acid-minimal-cluster',
{
'spec': {
'enableConnectionPooler': False
}
})
self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
self.eventuallyEqual(lambda: self.k8s.count_running_pods("connection-pooler=acid-minimal-cluster-pooler"),
0, "Pooler pods not scaled down")
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_patroni_config_update(self):
'''
Change Postgres config under Spec.Postgresql.Parameters and Spec.Patroni
and query Patroni config endpoint to check if manifest changes got applied
via restarting cluster through Patroni's rest api
'''
k8s = self.k8s
masterPod = k8s.get_cluster_leader_pod()
labels = 'application=spilo,cluster-name=acid-minimal-cluster,spilo-role=master'
creationTimestamp = masterPod.metadata.creation_timestamp
new_max_connections_value = "50"
# adjust max_connection
pg_patch_config = {
"spec": {
"postgresql": {
"parameters": {
"max_connections": new_max_connections_value
}
},
"patroni": {
"slots": {
"test_slot": {
"type": "physical"
}
},
"ttl": 29,
"loop_wait": 9,
"retry_timeout": 9,
"synchronous_mode": True
}
}
}
try:
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_config)
self.eventuallyEqual(lambda: self.k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
def compare_config():
effective_config = k8s.patroni_rest(masterPod.metadata.name, "config")
desired_patroni = pg_patch_config["spec"]["patroni"]
desired_parameters = pg_patch_config["spec"]["postgresql"]["parameters"]
effective_parameters = effective_config["postgresql"]["parameters"]
self.assertEqual(desired_parameters["max_connections"], effective_parameters["max_connections"],
"max_connections not updated")
self.assertTrue(effective_config["slots"] is not None, "physical replication slot not added")
self.assertEqual(desired_patroni["ttl"], effective_config["ttl"],
"ttl not updated")
self.assertEqual(desired_patroni["loop_wait"], effective_config["loop_wait"],
"loop_wait not updated")
self.assertEqual(desired_patroni["retry_timeout"], effective_config["retry_timeout"],
"retry_timeout not updated")
self.assertEqual(desired_patroni["synchronous_mode"], effective_config["synchronous_mode"],
"synchronous_mode not updated")
return True
self.eventuallyTrue(compare_config, "Postgres config not applied")
setting_query = """
SELECT setting
FROM pg_settings
WHERE name = 'max_connections';
"""
self.eventuallyEqual(lambda: self.query_database(masterPod.metadata.name, "postgres", setting_query)[0], new_max_connections_value,
"New max_connections setting not applied", 10, 5)
# make sure that pod wasn't recreated
self.assertEqual(creationTimestamp, masterPod.metadata.creation_timestamp,
"Master pod creation timestamp is updated")
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
# make sure cluster is in a good state for further tests
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2,
"No 2 pods running")
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_rolling_update_flag(self):
'''
Add rolling update flag to only the master and see it failing over
'''
k8s = self.k8s
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
# verify we are in good state from potential previous tests
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running")
# get node and replica (expected target of new master)
_, replica_nodes = k8s.get_pg_nodes(cluster_label)
# rolling update annotation
flag = {
"metadata": {
"annotations": {
"zalando-postgres-operator-rolling-update-required": "true",
}
}
}
try:
podsList = k8s.api.core_v1.list_namespaced_pod('default', label_selector=cluster_label)
for pod in podsList.items:
# add flag only to the master to make it appear to the operator as a leftover from a rolling update
if pod.metadata.labels.get('spilo-role') == 'master':
old_creation_timestamp = pod.metadata.creation_timestamp
k8s.patch_pod(flag, pod.metadata.name, pod.metadata.namespace)
else:
# remember replica name to check if operator does a switchover
switchover_target = pod.metadata.name
# do not wait until the next sync
k8s.delete_operator_pod()
# operator should now recreate the master pod and do a switchover before
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
# check if the former replica is now the new master
leader = k8s.get_cluster_leader_pod()
self.eventuallyEqual(lambda: leader.metadata.name, switchover_target, "Rolling update flag did not trigger switchover")
# check that the old master has been recreated
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
replica = k8s.get_cluster_replica_pod()
self.assertTrue(replica.metadata.creation_timestamp > old_creation_timestamp, "Old master pod was not recreated")
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_rolling_update_label_timeout(self):
'''
Simulate case when replica does not receive label in time and rolling update does not finish
'''
k8s = self.k8s
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
flag = "zalando-postgres-operator-rolling-update-required"
# verify we are in good state from potential previous tests
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running")
# get node and replica (expected target of new master)
_, replica_nodes = k8s.get_pg_nodes(cluster_label)
# rolling update annotation
rolling_update_patch = {
"metadata": {
"annotations": {
flag: "true",
}
}
}
# make pod_label_wait_timeout so short that rolling update fails on first try
# temporarily lower resync interval to reduce waiting for further tests
# pods should get healthy in the meantime
patch_resync_config = {
"data": {
"pod_label_wait_timeout": "2s",
"resync_period": "30s",
}
}
try:
# patch both pods for rolling update
podList = k8s.api.core_v1.list_namespaced_pod('default', label_selector=cluster_label)
for pod in podList.items:
k8s.patch_pod(rolling_update_patch, pod.metadata.name, pod.metadata.namespace)
if pod.metadata.labels.get('spilo-role') == 'replica':
switchover_target = pod.metadata.name
# update config and restart operator
k8s.update_config(patch_resync_config, "update resync interval and pod_label_wait_timeout")
# operator should now recreate the replica pod first and do a switchover after
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
# pod_label_wait_timeout should have been exceeded hence the rolling update is continued on next sync
# check if the cluster state is "SyncFailed"
self.eventuallyEqual(lambda: k8s.pg_get_status(), "SyncFailed", "Expected SYNC event to fail")
# wait for next sync, replica should be running normally by now and be ready for switchover
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
# check if the former replica is now the new master
leader = k8s.get_cluster_leader_pod()
self.eventuallyEqual(lambda: leader.metadata.name, switchover_target, "Rolling update flag did not trigger switchover")
# wait for the old master to get restarted
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
# status should again be "SyncFailed" but turn into "Running" on the next sync
time.sleep(30)
self.eventuallyEqual(lambda: k8s.pg_get_status(), "Running", "Expected running cluster after two syncs")
# revert config changes
patch_resync_config = {
"data": {
"pod_label_wait_timeout": "10m",
"resync_period": "30m",
}
}
k8s.update_config(patch_resync_config, "revert resync interval and pod_label_wait_timeout")
except timeout_decorator.TimeoutError:
print('Operator log: {}'.format(k8s.get_operator_log()))
raise
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_scaling(self):
'''
Scale up from 2 to 3 and back to 2 pods by updating the Postgres manifest at runtime.
'''
k8s = self.k8s
pod = "acid-minimal-cluster-0"
k8s.scale_cluster(3)
self.eventuallyEqual(lambda: k8s.count_running_pods(), 3, "Scale up to 3 failed")
self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod)), 3, "Not all 3 nodes healthy")
k8s.scale_cluster(2)
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "Scale down to 2 failed")
self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members(pod)), 2, "Not all members 2 healthy")
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_service_annotations(self):
'''
Create a Postgres cluster with service annotations and check them.
'''
k8s = self.k8s
patch_custom_service_annotations = {
"data": {
"custom_service_annotations": "foo:bar",
}
}
k8s.update_config(patch_custom_service_annotations)
pg_patch_custom_annotations = {
"spec": {
"serviceAnnotations": {
"annotation.key": "value",
"alice": "bob",
}
}
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_patch_custom_annotations)
annotations = {
"annotation.key": "value",
"foo": "bar",
"alice": "bob"
}
self.eventuallyTrue(lambda: k8s.check_service_annotations("cluster-name=acid-minimal-cluster,spilo-role=master", annotations), "Wrong annotations")
self.eventuallyTrue(lambda: k8s.check_service_annotations("cluster-name=acid-minimal-cluster,spilo-role=replica", annotations), "Wrong annotations")
# clean up
unpatch_custom_service_annotations = {
"data": {
"custom_service_annotations": "",
}
}
k8s.update_config(unpatch_custom_service_annotations)
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_statefulset_annotation_propagation(self):
'''
Inject annotation to Postgresql CRD and check it's propagation to stateful set
'''
k8s = self.k8s
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
patch_sset_propagate_annotations = {
"data": {
"downscaler_annotations": "deployment-time,downscaler/*",
"inherited_annotations": "owned-by",
}
}
k8s.update_config(patch_sset_propagate_annotations)
pg_crd_annotations = {
"metadata": {
"annotations": {
"deployment-time": "2020-04-30 12:00:00",
"downscaler/downtime_replicas": "0",
"owned-by": "acid",
},
}
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_crd_annotations)
annotations = {
"deployment-time": "2020-04-30 12:00:00",
"downscaler/downtime_replicas": "0",
"owned-by": "acid",
}
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
self.eventuallyTrue(lambda: k8s.check_statefulset_annotations(cluster_label, annotations), "Annotations missing")
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
@unittest.skip("Skipping this test until fixed")
def test_taint_based_eviction(self):
'''
Add taint "postgres=:NoExecute" to node with master. This must cause a failover.
'''
k8s = self.k8s
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
# verify we are in good state from potential previous tests
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running")
self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running")
# get nodes of master and replica(s) (expected target of new master)
master_nodes, replica_nodes = k8s.get_cluster_nodes()
self.assertNotEqual(master_nodes, [])
self.assertNotEqual(replica_nodes, [])
# taint node with postgres=:NoExecute to force failover
body = {
"spec": {
"taints": [
{
"effect": "NoExecute",
"key": "postgres"
}
]
}
}
k8s.api.core_v1.patch_node(master_nodes[0], body)
self.eventuallyTrue(lambda: k8s.get_cluster_nodes()[0], replica_nodes)
self.assertNotEqual(lambda: k8s.get_cluster_nodes()[0], master_nodes)
# add toleration to pods
patch_toleration_config = {
"data": {
"toleration": "key:postgres,operator:Exists,effect:NoExecute"
}
}
k8s.update_config(patch_toleration_config, step="allow tainted nodes")
self.eventuallyEqual(lambda: k8s.count_running_pods(), 2, "No 2 pods running")
self.eventuallyEqual(lambda: len(k8s.get_patroni_running_members("acid-minimal-cluster-0")), 2, "Postgres status did not enter running")
# toggle pod anti affinity to move replica away from master node
nm, new_replica_nodes = k8s.get_cluster_nodes()
new_master_node = nm[0]
self.assert_distributed_pods(new_master_node, new_replica_nodes, cluster_label)
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_zz_cluster_deletion(self):
''' '''
Test deletion with configured protection Test deletion with configured protection
''' '''

View File

@ -9,7 +9,7 @@ metadata:
# "delete-date": "2020-08-31" # can only be deleted on that day if "delete-date "key is configured # "delete-date": "2020-08-31" # can only be deleted on that day if "delete-date "key is configured
# "delete-clustername": "acid-test-cluster" # can only be deleted when name matches if "delete-clustername" key is configured # "delete-clustername": "acid-test-cluster" # can only be deleted when name matches if "delete-clustername" key is configured
spec: spec:
dockerImage: registry.opensource.zalan.do/acid/spilo-13:2.0-p7 dockerImage: registry.opensource.zalan.do/acid/spilo-13:2.1-p1
teamId: "acid" teamId: "acid"
numberOfInstances: 2 numberOfInstances: 2
users: # Application/Robot users users: # Application/Robot users
@ -46,6 +46,12 @@ spec:
# storageClass: my-sc # storageClass: my-sc
# iops: 1000 # for EBS gp3 # iops: 1000 # for EBS gp3
# throughput: 250 # in MB/s for EBS gp3 # throughput: 250 # in MB/s for EBS gp3
# selector:
# matchExpressions:
# - { key: flavour, operator: In, values: [ "banana", "chocolate" ] }
# matchLabels:
# environment: dev
# service: postgres
additionalVolumes: additionalVolumes:
- name: empty - name: empty
mountPath: /opt/empty mountPath: /opt/empty

View File

@ -16,7 +16,7 @@ data:
# connection_pooler_default_cpu_request: "500m" # connection_pooler_default_cpu_request: "500m"
# connection_pooler_default_memory_limit: 100Mi # connection_pooler_default_memory_limit: 100Mi
# connection_pooler_default_memory_request: 100Mi # connection_pooler_default_memory_request: 100Mi
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-16" connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-18"
# connection_pooler_max_db_connections: 60 # connection_pooler_max_db_connections: 60
# connection_pooler_mode: "transaction" # connection_pooler_mode: "transaction"
# connection_pooler_number_of_instances: 2 # connection_pooler_number_of_instances: 2
@ -32,10 +32,11 @@ data:
# default_memory_request: 100Mi # default_memory_request: 100Mi
# delete_annotation_date_key: delete-date # delete_annotation_date_key: delete-date
# delete_annotation_name_key: delete-clustername # delete_annotation_name_key: delete-clustername
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p7 docker_image: registry.opensource.zalan.do/acid/spilo-13:2.1-p1
# downscaler_annotations: "deployment-time,downscaler/*" # downscaler_annotations: "deployment-time,downscaler/*"
# enable_admin_role_for_users: "true" # enable_admin_role_for_users: "true"
# enable_crd_validation: "true" # enable_crd_validation: "true"
# enable_cross_namespace_secret: "false"
# enable_database_access: "true" # enable_database_access: "true"
enable_ebs_gp3_migration: "false" enable_ebs_gp3_migration: "false"
# enable_ebs_gp3_migration_max_size: "1000" # enable_ebs_gp3_migration_max_size: "1000"
@ -64,7 +65,7 @@ data:
# inherited_labels: application,environment # inherited_labels: application,environment
# kube_iam_role: "" # kube_iam_role: ""
# log_s3_bucket: "" # log_s3_bucket: ""
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.3" logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.7.0"
# logical_backup_google_application_credentials: "" # logical_backup_google_application_credentials: ""
logical_backup_job_prefix: "logical-backup-" logical_backup_job_prefix: "logical-backup-"
logical_backup_provider: "s3" logical_backup_provider: "s3"
@ -128,6 +129,7 @@ data:
# team_api_role_configuration: "log_statement:all" # team_api_role_configuration: "log_statement:all"
# teams_api_url: http://fake-teams-api.default.svc.cluster.local # teams_api_url: http://fake-teams-api.default.svc.cluster.local
# toleration: "" # toleration: ""
# wal_az_storage_account: ""
# wal_gs_bucket: "" # wal_gs_bucket: ""
# wal_s3_bucket: "" # wal_s3_bucket: ""
watched_namespace: "*" # listen to all namespaces watched_namespace: "*" # listen to all namespaces

View File

@ -23,7 +23,7 @@ spec:
serviceAccountName: postgres-operator serviceAccountName: postgres-operator
containers: containers:
- name: postgres-operator - name: postgres-operator
image: registry.opensource.zalan.do/acid/pgbouncer:master-16 image: registry.opensource.zalan.do/acid/pgbouncer:master-18
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
resources: resources:
requests: requests:

View File

@ -61,7 +61,7 @@ spec:
properties: properties:
docker_image: docker_image:
type: string type: string
default: "registry.opensource.zalan.do/acid/spilo-13:2.0-p7" default: "registry.opensource.zalan.do/acid/spilo-13:2.1-p1"
enable_crd_validation: enable_crd_validation:
type: boolean type: boolean
default: true default: true
@ -384,6 +384,8 @@ spec:
type: string type: string
log_s3_bucket: log_s3_bucket:
type: string type: string
wal_az_storage_account:
type: string
wal_gs_bucket: wal_gs_bucket:
type: string type: string
wal_s3_bucket: wal_s3_bucket:
@ -393,7 +395,7 @@ spec:
properties: properties:
logical_backup_docker_image: logical_backup_docker_image:
type: string type: string
default: "registry.opensource.zalan.do/acid/logical-backup:v1.6.3" default: "registry.opensource.zalan.do/acid/logical-backup:v1.7.0"
logical_backup_google_application_credentials: logical_backup_google_application_credentials:
type: string type: string
logical_backup_job_prefix: logical_backup_job_prefix:
@ -528,7 +530,7 @@ spec:
default: "pooler" default: "pooler"
connection_pooler_image: connection_pooler_image:
type: string type: string
default: "registry.opensource.zalan.do/acid/pgbouncer:master-16" default: "registry.opensource.zalan.do/acid/pgbouncer:master-18"
connection_pooler_max_db_connections: connection_pooler_max_db_connections:
type: integer type: integer
default: 60 default: 60

View File

@ -19,7 +19,7 @@ spec:
serviceAccountName: postgres-operator serviceAccountName: postgres-operator
containers: containers:
- name: postgres-operator - name: postgres-operator
image: registry.opensource.zalan.do/acid/postgres-operator:v1.6.3 image: registry.opensource.zalan.do/acid/postgres-operator:v1.7.0
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
resources: resources:
requests: requests:

View File

@ -3,7 +3,7 @@ kind: OperatorConfiguration
metadata: metadata:
name: postgresql-operator-default-configuration name: postgresql-operator-default-configuration
configuration: configuration:
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p7 docker_image: registry.opensource.zalan.do/acid/spilo-13:2.1-p1
# enable_crd_validation: true # enable_crd_validation: true
# enable_lazy_spilo_upgrade: false # enable_lazy_spilo_upgrade: false
enable_pgversion_env_var: true enable_pgversion_env_var: true
@ -45,6 +45,7 @@ configuration:
# downscaler_annotations: # downscaler_annotations:
# - deployment-time # - deployment-time
# - downscaler/* # - downscaler/*
# enable_cross_namespace_secret: "false"
enable_init_containers: true enable_init_containers: true
enable_pod_antiaffinity: false enable_pod_antiaffinity: false
enable_pod_disruption_budget: true enable_pod_disruption_budget: true
@ -120,10 +121,11 @@ configuration:
# gcp_credentials: "" # gcp_credentials: ""
# kube_iam_role: "" # kube_iam_role: ""
# log_s3_bucket: "" # log_s3_bucket: ""
# wal_az_storage_account: ""
# wal_gs_bucket: "" # wal_gs_bucket: ""
# wal_s3_bucket: "" # wal_s3_bucket: ""
logical_backup: logical_backup:
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.3" logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.7.0"
# logical_backup_google_application_credentials: "" # logical_backup_google_application_credentials: ""
logical_backup_job_prefix: "logical-backup-" logical_backup_job_prefix: "logical-backup-"
logical_backup_provider: "s3" logical_backup_provider: "s3"
@ -164,7 +166,7 @@ configuration:
connection_pooler_default_cpu_request: "500m" connection_pooler_default_cpu_request: "500m"
connection_pooler_default_memory_limit: 100Mi connection_pooler_default_memory_limit: 100Mi
connection_pooler_default_memory_request: 100Mi connection_pooler_default_memory_request: 100Mi
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-16" connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-18"
# connection_pooler_max_db_connections: 60 # connection_pooler_max_db_connections: 60
connection_pooler_mode: "transaction" connection_pooler_mode: "transaction"
connection_pooler_number_of_instances: 2 connection_pooler_number_of_instances: 2

View File

@ -219,6 +219,97 @@ spec:
items: items:
type: string type: string
pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$' pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$'
nodeAffinity:
type: object
properties:
preferredDuringSchedulingIgnoredDuringExecution:
type: array
items:
type: object
required:
- weight
- preference
properties:
preference:
type: object
properties:
matchExpressions:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
matchFields:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
weight:
format: int32
type: integer
requiredDuringSchedulingIgnoredDuringExecution:
type: object
required:
- nodeSelectorTerms
properties:
nodeSelectorTerms:
type: array
items:
type: object
properties:
matchExpressions:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
matchFields:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
numberOfInstances: numberOfInstances:
type: integer type: integer
minimum: 0 minimum: 0
@ -299,6 +390,8 @@ spec:
type: boolean type: boolean
defaultRoles: defaultRoles:
type: boolean type: boolean
secretNamespace:
type: string
replicaLoadBalancer: # deprecated replicaLoadBalancer: # deprecated
type: boolean type: boolean
resources: resources:
@ -392,97 +485,6 @@ spec:
type: string type: string
caSecretName: caSecretName:
type: string type: string
nodeAffinity:
type: object
properties:
preferredDuringSchedulingIgnoredDuringExecution:
type: array
items:
type: object
required:
- weight
- preference
properties:
preference:
type: object
properties:
matchExpressions:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
matchFields:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
weight:
format: int32
type: integer
requiredDuringSchedulingIgnoredDuringExecution:
type: object
required:
- nodeSelectorTerms
properties:
nodeSelectorTerms:
type: array
items:
type: object
properties:
matchExpressions:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
matchFields:
type: array
items:
type: object
required:
- key
- operator
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
tolerations: tolerations:
type: array type: array
items: items:
@ -555,6 +557,24 @@ spec:
properties: properties:
iops: iops:
type: integer type: integer
selector:
type: object
properties:
matchExpressions:
type: array
items:
type: object
properties:
key:
type: string
operator:
type: string
values:
type: array
items:
type: string
matchLabels:
type: object
size: size:
type: string type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'

View File

@ -341,6 +341,91 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
}, },
}, },
}, },
"nodeAffinity": {
Type: "object",
Properties: map[string]apiextv1.JSONSchemaProps{
"preferredDuringSchedulingIgnoredDuringExecution": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
Required: []string{"preference", "weight"},
Properties: map[string]apiextv1.JSONSchemaProps{
"preference": {
Type: "object",
Properties: map[string]apiextv1.JSONSchemaProps{
"matchExpressions": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
Allows: true,
},
},
},
},
"matchFields": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
Allows: true,
},
},
},
},
},
},
"weight": {
Type: "integer",
Format: "int32",
},
},
},
},
},
"requiredDuringSchedulingIgnoredDuringExecution": {
Type: "object",
Required: []string{"nodeSelectorTerms"},
Properties: map[string]apiextv1.JSONSchemaProps{
"nodeSelectorTerms": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
Properties: map[string]apiextv1.JSONSchemaProps{
"matchExpressions": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
Allows: true,
},
},
},
},
"matchFields": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
Allows: true,
},
},
},
},
},
},
},
},
},
},
},
},
"numberOfInstances": { "numberOfInstances": {
Type: "integer", Type: "integer",
Minimum: &min0, Minimum: &min0,
@ -488,6 +573,9 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
}, },
}, },
}, },
"secretNamespace": {
Type: "string",
},
}, },
}, },
}, },
@ -596,91 +684,6 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
}, },
}, },
}, },
"nodeAffinity": {
Type: "object",
Properties: map[string]apiextv1.JSONSchemaProps{
"preferredDuringSchedulingIgnoredDuringExecution": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
Required: []string{"preference", "weight"},
Properties: map[string]apiextv1.JSONSchemaProps{
"preference": {
Type: "object",
Properties: map[string]apiextv1.JSONSchemaProps{
"matchExpressions": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
Allows: true,
},
},
},
},
"matchFields": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
Allows: true,
},
},
},
},
},
},
"weight": {
Type: "integer",
Format: "int32",
},
},
},
},
},
"requiredDuringSchedulingIgnoredDuringExecution": {
Type: "object",
Required: []string{"nodeSelectorTerms"},
Properties: map[string]apiextv1.JSONSchemaProps{
"nodeSelectorTerms": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
Properties: map[string]apiextv1.JSONSchemaProps{
"matchExpressions": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
Allows: true,
},
},
},
},
"matchFields": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
Allows: true,
},
},
},
},
},
},
},
},
},
},
},
},
"tolerations": { "tolerations": {
Type: "array", Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{ Items: &apiextv1.JSONSchemaPropsOrArray{
@ -838,6 +841,54 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
"iops": { "iops": {
Type: "integer", Type: "integer",
}, },
"selector": {
Type: "object",
Properties: map[string]apiextv1.JSONSchemaProps{
"matchExpressions": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "object",
Required: []string{"key", "operator", "values"},
Properties: map[string]apiextv1.JSONSchemaProps{
"key": {
Type: "string",
},
"operator": {
Type: "string",
Enum: []apiextv1.JSON{
{
Raw: []byte(`"In"`),
},
{
Raw: []byte(`"NotIn"`),
},
{
Raw: []byte(`"Exists"`),
},
{
Raw: []byte(`"DoesNotExist"`),
},
},
},
"values": {
Type: "array",
Items: &apiextv1.JSONSchemaPropsOrArray{
Schema: &apiextv1.JSONSchemaProps{
Type: "string",
},
},
},
},
},
},
},
"matchLabels": {
Type: "object",
XPreserveUnknownFields: util.True(),
},
},
},
"size": { "size": {
Type: "string", Type: "string",
Description: "Value must not be zero", Description: "Value must not be zero",
@ -1026,6 +1077,9 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
}, },
}, },
}, },
"enable_cross_namespace_secret": {
Type: "boolean",
},
"enable_init_containers": { "enable_init_containers": {
Type: "boolean", Type: "boolean",
}, },

View File

@ -81,7 +81,7 @@ func (ps *PostgresStatus) UnmarshalJSON(data []byte) error {
if err != nil { if err != nil {
metaErr := json.Unmarshal(data, &status) metaErr := json.Unmarshal(data, &status)
if metaErr != nil { if metaErr != nil {
return fmt.Errorf("Could not parse status: %v; err %v", string(data), metaErr) return fmt.Errorf("could not parse status: %v; err %v", string(data), metaErr)
} }
tmp.PostgresClusterStatus = status tmp.PostgresClusterStatus = status
} }

View File

@ -91,6 +91,7 @@ type KubernetesMetaConfiguration struct {
EnablePodAntiAffinity bool `json:"enable_pod_antiaffinity,omitempty"` EnablePodAntiAffinity bool `json:"enable_pod_antiaffinity,omitempty"`
PodAntiAffinityTopologyKey string `json:"pod_antiaffinity_topology_key,omitempty"` PodAntiAffinityTopologyKey string `json:"pod_antiaffinity_topology_key,omitempty"`
PodManagementPolicy string `json:"pod_management_policy,omitempty"` PodManagementPolicy string `json:"pod_management_policy,omitempty"`
EnableCrossNamespaceSecret bool `json:"enable_cross_namespace_secret,omitempty"`
} }
// PostgresPodResourcesDefaults defines the spec of default resources // PostgresPodResourcesDefaults defines the spec of default resources
@ -131,6 +132,7 @@ type AWSGCPConfiguration struct {
AWSRegion string `json:"aws_region,omitempty"` AWSRegion string `json:"aws_region,omitempty"`
WALGSBucket string `json:"wal_gs_bucket,omitempty"` WALGSBucket string `json:"wal_gs_bucket,omitempty"`
GCPCredentials string `json:"gcp_credentials,omitempty"` GCPCredentials string `json:"gcp_credentials,omitempty"`
WALAZStorageAccount string `json:"wal_az_storage_account,omitempty"`
LogS3Bucket string `json:"log_s3_bucket,omitempty"` LogS3Bucket string `json:"log_s3_bucket,omitempty"`
KubeIAMRole string `json:"kube_iam_role,omitempty"` KubeIAMRole string `json:"kube_iam_role,omitempty"`
AdditionalSecretMount string `json:"additional_secret_mount,omitempty"` AdditionalSecretMount string `json:"additional_secret_mount,omitempty"`

View File

@ -95,6 +95,7 @@ type PreparedDatabase struct {
PreparedSchemas map[string]PreparedSchema `json:"schemas,omitempty"` PreparedSchemas map[string]PreparedSchema `json:"schemas,omitempty"`
DefaultUsers bool `json:"defaultUsers,omitempty" defaults:"false"` DefaultUsers bool `json:"defaultUsers,omitempty" defaults:"false"`
Extensions map[string]string `json:"extensions,omitempty"` Extensions map[string]string `json:"extensions,omitempty"`
SecretNamespace string `json:"secretNamespace,omitempty"`
} }
// PreparedSchema describes elements to be bootstrapped per schema // PreparedSchema describes elements to be bootstrapped per schema
@ -113,6 +114,7 @@ type MaintenanceWindow struct {
// Volume describes a single volume in the manifest. // Volume describes a single volume in the manifest.
type Volume struct { type Volume struct {
Selector *metav1.LabelSelector `json:"selector,omitempty"`
Size string `json:"size"` Size string `json:"size"`
StorageClass string `json:"storageClass,omitempty"` StorageClass string `json:"storageClass,omitempty"`
SubPath string `json:"subPath,omitempty"` SubPath string `json:"subPath,omitempty"`

View File

@ -29,6 +29,7 @@ package v1
import ( import (
config "github.com/zalando/postgres-operator/pkg/util/config" config "github.com/zalando/postgres-operator/pkg/util/config"
corev1 "k8s.io/api/core/v1" corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
runtime "k8s.io/apimachinery/pkg/runtime" runtime "k8s.io/apimachinery/pkg/runtime"
) )
@ -314,22 +315,6 @@ func (in *MaintenanceWindow) DeepCopy() *MaintenanceWindow {
return out return out
} }
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MajorVersionUpgradeConfiguration) DeepCopyInto(out *MajorVersionUpgradeConfiguration) {
*out = *in
return
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MajorVersionUpgradeConfiguration.
func (in *MajorVersionUpgradeConfiguration) DeepCopy() *MajorVersionUpgradeConfiguration {
if in == nil {
return nil
}
out := new(MajorVersionUpgradeConfiguration)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *OperatorConfiguration) DeepCopyInto(out *OperatorConfiguration) { func (in *OperatorConfiguration) DeepCopyInto(out *OperatorConfiguration) {
*out = *in *out = *in
@ -385,7 +370,6 @@ func (in *OperatorConfigurationData) DeepCopyInto(out *OperatorConfigurationData
} }
} }
out.PostgresUsersConfiguration = in.PostgresUsersConfiguration out.PostgresUsersConfiguration = in.PostgresUsersConfiguration
out.MajorVersionUpgrade = in.MajorVersionUpgrade
in.Kubernetes.DeepCopyInto(&out.Kubernetes) in.Kubernetes.DeepCopyInto(&out.Kubernetes)
out.PostgresPodResources = in.PostgresPodResources out.PostgresPodResources = in.PostgresPodResources
out.Timeouts = in.Timeouts out.Timeouts = in.Timeouts
@ -1197,6 +1181,11 @@ func (in UserFlags) DeepCopy() UserFlags {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *Volume) DeepCopyInto(out *Volume) { func (in *Volume) DeepCopyInto(out *Volume) {
*out = *in *out = *in
if in.Selector != nil {
in, out := &in.Selector, &out.Selector
*out = new(metav1.LabelSelector)
(*in).DeepCopyInto(*out)
}
if in.Iops != nil { if in.Iops != nil {
in, out := &in.Iops, &out.Iops in, out := &in.Iops, &out.Iops
*out = new(int64) *out = new(int64)

View File

@ -5,6 +5,7 @@ package cluster
import ( import (
"context" "context"
"database/sql" "database/sql"
"encoding/json"
"fmt" "fmt"
"reflect" "reflect"
"regexp" "regexp"
@ -519,7 +520,7 @@ func (c *Cluster) compareContainers(description string, setA, setB []v1.Containe
newCheck("new statefulset %s's %s (index %d) resources do not match the current ones", newCheck("new statefulset %s's %s (index %d) resources do not match the current ones",
func(a, b v1.Container) bool { return !compareResources(&a.Resources, &b.Resources) }), func(a, b v1.Container) bool { return !compareResources(&a.Resources, &b.Resources) }),
newCheck("new statefulset %s's %s (index %d) environment does not match the current one", newCheck("new statefulset %s's %s (index %d) environment does not match the current one",
func(a, b v1.Container) bool { return !reflect.DeepEqual(a.Env, b.Env) }), func(a, b v1.Container) bool { return !compareEnv(a.Env, b.Env) }),
newCheck("new statefulset %s's %s (index %d) environment sources do not match the current one", newCheck("new statefulset %s's %s (index %d) environment sources do not match the current one",
func(a, b v1.Container) bool { return !reflect.DeepEqual(a.EnvFrom, b.EnvFrom) }), func(a, b v1.Container) bool { return !reflect.DeepEqual(a.EnvFrom, b.EnvFrom) }),
newCheck("new statefulset %s's %s (index %d) security context does not match the current one", newCheck("new statefulset %s's %s (index %d) security context does not match the current one",
@ -576,6 +577,56 @@ func compareResourcesAssumeFirstNotNil(a *v1.ResourceRequirements, b *v1.Resourc
} }
func compareEnv(a, b []v1.EnvVar) bool {
if len(a) != len(b) {
return false
}
equal := true
for _, enva := range a {
hasmatch := false
for _, envb := range b {
if enva.Name == envb.Name {
hasmatch = true
if enva.Name == "SPILO_CONFIGURATION" {
equal = compareSpiloConfiguration(enva.Value, envb.Value)
} else {
if enva.Value == "" && envb.Value == "" {
equal = reflect.DeepEqual(enva.ValueFrom, envb.ValueFrom)
} else {
equal = (enva.Value == envb.Value)
}
}
if !equal {
return false
}
}
}
if !hasmatch {
return false
}
}
return true
}
func compareSpiloConfiguration(configa, configb string) bool {
var (
oa, ob spiloConfiguration
)
var err error
err = json.Unmarshal([]byte(configa), &oa)
if err != nil {
return false
}
oa.Bootstrap.DCS = patroniDCS{}
err = json.Unmarshal([]byte(configb), &ob)
if err != nil {
return false
}
ob.Bootstrap.DCS = patroniDCS{}
return reflect.DeepEqual(oa, ob)
}
func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error { func (c *Cluster) enforceMinResourceLimits(spec *acidv1.PostgresSpec) error {
var ( var (
@ -942,20 +993,22 @@ func (c *Cluster) initSystemUsers() {
c.systemUsers[constants.SuperuserKeyName] = spec.PgUser{ c.systemUsers[constants.SuperuserKeyName] = spec.PgUser{
Origin: spec.RoleOriginSystem, Origin: spec.RoleOriginSystem,
Name: c.OpConfig.SuperUsername, Name: c.OpConfig.SuperUsername,
Namespace: c.Namespace,
Password: util.RandomPassword(constants.PasswordLength), Password: util.RandomPassword(constants.PasswordLength),
} }
c.systemUsers[constants.ReplicationUserKeyName] = spec.PgUser{ c.systemUsers[constants.ReplicationUserKeyName] = spec.PgUser{
Origin: spec.RoleOriginSystem, Origin: spec.RoleOriginSystem,
Name: c.OpConfig.ReplicationUsername, Name: c.OpConfig.ReplicationUsername,
Namespace: c.Namespace,
Password: util.RandomPassword(constants.PasswordLength), Password: util.RandomPassword(constants.PasswordLength),
} }
// Connection pooler user is an exception, if requested it's going to be // Connection pooler user is an exception, if requested it's going to be
// created by operator as a normal pgUser // created by operator as a normal pgUser
if needConnectionPooler(&c.Spec) { if needConnectionPooler(&c.Spec) {
// initialize empty connection pooler if not done yet connectionPoolerSpec := c.Spec.ConnectionPooler
if c.Spec.ConnectionPooler == nil { if connectionPoolerSpec == nil {
c.Spec.ConnectionPooler = &acidv1.ConnectionPooler{} connectionPoolerSpec = &acidv1.ConnectionPooler{}
} }
// Using superuser as pooler user is not a good idea. First of all it's // Using superuser as pooler user is not a good idea. First of all it's
@ -963,13 +1016,13 @@ func (c *Cluster) initSystemUsers() {
// and second it's a bad practice. // and second it's a bad practice.
username := c.OpConfig.ConnectionPooler.User username := c.OpConfig.ConnectionPooler.User
isSuperUser := c.Spec.ConnectionPooler.User == c.OpConfig.SuperUsername isSuperUser := connectionPoolerSpec.User == c.OpConfig.SuperUsername
isProtectedUser := c.shouldAvoidProtectedOrSystemRole( isProtectedUser := c.shouldAvoidProtectedOrSystemRole(
c.Spec.ConnectionPooler.User, "connection pool role") connectionPoolerSpec.User, "connection pool role")
if !isSuperUser && !isProtectedUser { if !isSuperUser && !isProtectedUser {
username = util.Coalesce( username = util.Coalesce(
c.Spec.ConnectionPooler.User, connectionPoolerSpec.User,
c.OpConfig.ConnectionPooler.User) c.OpConfig.ConnectionPooler.User)
} }
@ -977,6 +1030,7 @@ func (c *Cluster) initSystemUsers() {
connectionPoolerUser := spec.PgUser{ connectionPoolerUser := spec.PgUser{
Origin: spec.RoleConnectionPooler, Origin: spec.RoleConnectionPooler,
Name: username, Name: username,
Namespace: c.Namespace,
Flags: []string{constants.RoleFlagLogin}, Flags: []string{constants.RoleFlagLogin},
Password: util.RandomPassword(constants.PasswordLength), Password: util.RandomPassword(constants.PasswordLength),
} }
@ -1023,11 +1077,11 @@ func (c *Cluster) initPreparedDatabaseRoles() error {
} }
// default roles per database // default roles per database
if err := c.initDefaultRoles(defaultRoles, "admin", preparedDbName, searchPath.String()); err != nil { if err := c.initDefaultRoles(defaultRoles, "admin", preparedDbName, searchPath.String(), preparedDB.SecretNamespace); err != nil {
return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err) return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err)
} }
if preparedDB.DefaultUsers { if preparedDB.DefaultUsers {
if err := c.initDefaultRoles(defaultUsers, "admin", preparedDbName, searchPath.String()); err != nil { if err := c.initDefaultRoles(defaultUsers, "admin", preparedDbName, searchPath.String(), preparedDB.SecretNamespace); err != nil {
return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err) return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err)
} }
} }
@ -1038,14 +1092,14 @@ func (c *Cluster) initPreparedDatabaseRoles() error {
if err := c.initDefaultRoles(defaultRoles, if err := c.initDefaultRoles(defaultRoles,
preparedDbName+constants.OwnerRoleNameSuffix, preparedDbName+constants.OwnerRoleNameSuffix,
preparedDbName+"_"+preparedSchemaName, preparedDbName+"_"+preparedSchemaName,
constants.DefaultSearchPath+", "+preparedSchemaName); err != nil { constants.DefaultSearchPath+", "+preparedSchemaName, preparedDB.SecretNamespace); err != nil {
return fmt.Errorf("could not initialize default roles for database schema %s: %v", preparedSchemaName, err) return fmt.Errorf("could not initialize default roles for database schema %s: %v", preparedSchemaName, err)
} }
if preparedSchema.DefaultUsers { if preparedSchema.DefaultUsers {
if err := c.initDefaultRoles(defaultUsers, if err := c.initDefaultRoles(defaultUsers,
preparedDbName+constants.OwnerRoleNameSuffix, preparedDbName+constants.OwnerRoleNameSuffix,
preparedDbName+"_"+preparedSchemaName, preparedDbName+"_"+preparedSchemaName,
constants.DefaultSearchPath+", "+preparedSchemaName); err != nil { constants.DefaultSearchPath+", "+preparedSchemaName, preparedDB.SecretNamespace); err != nil {
return fmt.Errorf("could not initialize default users for database schema %s: %v", preparedSchemaName, err) return fmt.Errorf("could not initialize default users for database schema %s: %v", preparedSchemaName, err)
} }
} }
@ -1055,10 +1109,19 @@ func (c *Cluster) initPreparedDatabaseRoles() error {
return nil return nil
} }
func (c *Cluster) initDefaultRoles(defaultRoles map[string]string, admin, prefix string, searchPath string) error { func (c *Cluster) initDefaultRoles(defaultRoles map[string]string, admin, prefix, searchPath, secretNamespace string) error {
for defaultRole, inherits := range defaultRoles { for defaultRole, inherits := range defaultRoles {
namespace := c.Namespace
//if namespaced secrets are allowed
if secretNamespace != "" {
if c.Config.OpConfig.EnableCrossNamespaceSecret {
namespace = secretNamespace
} else {
c.logger.Warn("secretNamespace ignored because enable_cross_namespace_secret set to false. Creating secrets in cluster namespace.")
}
}
roleName := prefix + defaultRole roleName := prefix + defaultRole
flags := []string{constants.RoleFlagNoLogin} flags := []string{constants.RoleFlagNoLogin}
@ -1081,6 +1144,7 @@ func (c *Cluster) initDefaultRoles(defaultRoles map[string]string, admin, prefix
newRole := spec.PgUser{ newRole := spec.PgUser{
Origin: spec.RoleOriginBootstrap, Origin: spec.RoleOriginBootstrap,
Name: roleName, Name: roleName,
Namespace: namespace,
Password: util.RandomPassword(constants.PasswordLength), Password: util.RandomPassword(constants.PasswordLength),
Flags: flags, Flags: flags,
MemberOf: memberOf, MemberOf: memberOf,
@ -1105,6 +1169,16 @@ func (c *Cluster) initRobotUsers() error {
if c.shouldAvoidProtectedOrSystemRole(username, "manifest robot role") { if c.shouldAvoidProtectedOrSystemRole(username, "manifest robot role") {
continue continue
} }
namespace := c.Namespace
//if namespaced secrets are allowed
if c.Config.OpConfig.EnableCrossNamespaceSecret {
if strings.Contains(username, ".") {
splits := strings.Split(username, ".")
namespace = splits[0]
}
}
flags, err := normalizeUserFlags(userFlags) flags, err := normalizeUserFlags(userFlags)
if err != nil { if err != nil {
return fmt.Errorf("invalid flags for user %q: %v", username, err) return fmt.Errorf("invalid flags for user %q: %v", username, err)
@ -1116,6 +1190,7 @@ func (c *Cluster) initRobotUsers() error {
newRole := spec.PgUser{ newRole := spec.PgUser{
Origin: spec.RoleOriginManifest, Origin: spec.RoleOriginManifest,
Name: username, Name: username,
Namespace: namespace,
Password: util.RandomPassword(constants.PasswordLength), Password: util.RandomPassword(constants.PasswordLength),
Flags: flags, Flags: flags,
AdminRole: adminRole, AdminRole: adminRole,
@ -1233,6 +1308,7 @@ func (c *Cluster) initInfrastructureRoles() error {
return fmt.Errorf("invalid flags for user '%v': %v", username, err) return fmt.Errorf("invalid flags for user '%v': %v", username, err)
} }
newRole.Flags = flags newRole.Flags = flags
newRole.Namespace = c.Namespace
if currentRole, present := c.pgUsers[username]; present { if currentRole, present := c.pgUsers[username]; present {
c.pgUsers[username] = c.resolveNameConflict(&currentRole, &newRole) c.pgUsers[username] = c.resolveNameConflict(&currentRole, &newRole)

View File

@ -7,12 +7,15 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake"
"github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/spec"
"github.com/zalando/postgres-operator/pkg/util/config" "github.com/zalando/postgres-operator/pkg/util/config"
"github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/constants"
"github.com/zalando/postgres-operator/pkg/util/k8sutil" "github.com/zalando/postgres-operator/pkg/util/k8sutil"
"github.com/zalando/postgres-operator/pkg/util/teams" "github.com/zalando/postgres-operator/pkg/util/teams"
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes/fake"
"k8s.io/client-go/tools/record" "k8s.io/client-go/tools/record"
) )
@ -79,8 +82,8 @@ func TestInitRobotUsers(t *testing.T) {
}{ }{
{ {
manifestUsers: map[string]acidv1.UserFlags{"foo": {"superuser", "createdb"}}, manifestUsers: map[string]acidv1.UserFlags{"foo": {"superuser", "createdb"}},
infraRoles: map[string]spec.PgUser{"foo": {Origin: spec.RoleOriginInfrastructure, Name: "foo", Password: "bar"}}, infraRoles: map[string]spec.PgUser{"foo": {Origin: spec.RoleOriginInfrastructure, Name: "foo", Namespace: cl.Namespace, Password: "bar"}},
result: map[string]spec.PgUser{"foo": {Origin: spec.RoleOriginInfrastructure, Name: "foo", Password: "bar"}}, result: map[string]spec.PgUser{"foo": {Origin: spec.RoleOriginInfrastructure, Name: "foo", Namespace: cl.Namespace, Password: "bar"}},
err: nil, err: nil,
}, },
{ {
@ -845,3 +848,243 @@ func TestPreparedDatabases(t *testing.T) {
} }
} }
} }
func TestCompareSpiloConfiguration(t *testing.T) {
testCases := []struct {
Config string
ExpectedResult bool
}{
{
`{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"test":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"100","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`,
true,
},
{
`{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"test":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"200","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`,
true,
},
{
`{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"test":{"password":"","options":["CREATEDB"]}},"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"200","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`,
false,
},
{
`{}`,
false,
},
{
`invalidjson`,
false,
},
}
refCase := testCases[0]
for _, testCase := range testCases {
if result := compareSpiloConfiguration(refCase.Config, testCase.Config); result != testCase.ExpectedResult {
t.Errorf("expected %v got %v", testCase.ExpectedResult, result)
}
}
}
func TestCompareEnv(t *testing.T) {
testCases := []struct {
Envs []v1.EnvVar
ExpectedResult bool
}{
{
Envs: []v1.EnvVar{
{
Name: "VARIABLE1",
Value: "value1",
},
{
Name: "VARIABLE2",
Value: "value2",
},
{
Name: "VARIABLE3",
Value: "value3",
},
{
Name: "SPILO_CONFIGURATION",
Value: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"test":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"100","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`,
},
},
ExpectedResult: true,
},
{
Envs: []v1.EnvVar{
{
Name: "VARIABLE1",
Value: "value1",
},
{
Name: "VARIABLE2",
Value: "value2",
},
{
Name: "VARIABLE3",
Value: "value3",
},
{
Name: "SPILO_CONFIGURATION",
Value: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"test":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`,
},
},
ExpectedResult: true,
},
{
Envs: []v1.EnvVar{
{
Name: "VARIABLE4",
Value: "value4",
},
{
Name: "VARIABLE2",
Value: "value2",
},
{
Name: "VARIABLE3",
Value: "value3",
},
{
Name: "SPILO_CONFIGURATION",
Value: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"test":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`,
},
},
ExpectedResult: false,
},
{
Envs: []v1.EnvVar{
{
Name: "VARIABLE1",
Value: "value1",
},
{
Name: "VARIABLE2",
Value: "value2",
},
{
Name: "VARIABLE3",
Value: "value3",
},
{
Name: "VARIABLE4",
Value: "value4",
},
{
Name: "SPILO_CONFIGURATION",
Value: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"test":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"100","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`,
},
},
ExpectedResult: false,
},
{
Envs: []v1.EnvVar{
{
Name: "VARIABLE1",
Value: "value1",
},
{
Name: "VARIABLE2",
Value: "value2",
},
{
Name: "SPILO_CONFIGURATION",
Value: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/12/bin","parameters":{"autovacuum_analyze_scale_factor":"0.1"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"users":{"test":{"password":"","options":["CREATEDB","NOLOGIN"]}},"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"postgresql":{"parameters":{"max_connections":"100","max_locks_per_transaction":"64","max_worker_processes":"4"}}}}}`,
},
},
ExpectedResult: false,
},
}
refCase := testCases[0]
for _, testCase := range testCases {
if result := compareEnv(refCase.Envs, testCase.Envs); result != testCase.ExpectedResult {
t.Errorf("expected %v got %v", testCase.ExpectedResult, result)
}
}
}
func TestCrossNamespacedSecrets(t *testing.T) {
testName := "test secrets in different namespace"
clientSet := fake.NewSimpleClientset()
acidClientSet := fakeacidv1.NewSimpleClientset()
namespace := "default"
client := k8sutil.KubernetesClient{
StatefulSetsGetter: clientSet.AppsV1(),
ServicesGetter: clientSet.CoreV1(),
DeploymentsGetter: clientSet.AppsV1(),
PostgresqlsGetter: acidClientSet.AcidV1(),
SecretsGetter: clientSet.CoreV1(),
}
pg := acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{
Name: "acid-fake-cluster",
Namespace: namespace,
},
Spec: acidv1.PostgresSpec{
Volume: acidv1.Volume{
Size: "1Gi",
},
Users: map[string]acidv1.UserFlags{
"appspace.db_user": {},
"db_user": {},
},
},
}
var cluster = New(
Config{
OpConfig: config.Config{
ConnectionPooler: config.ConnectionPooler{
ConnectionPoolerDefaultCPURequest: "100m",
ConnectionPoolerDefaultCPULimit: "100m",
ConnectionPoolerDefaultMemoryRequest: "100Mi",
ConnectionPoolerDefaultMemoryLimit: "100Mi",
NumberOfInstances: int32ToPointer(1),
},
PodManagementPolicy: "ordered_ready",
Resources: config.Resources{
ClusterLabels: map[string]string{"application": "spilo"},
ClusterNameLabel: "cluster-name",
DefaultCPURequest: "300m",
DefaultCPULimit: "300m",
DefaultMemoryRequest: "300Mi",
DefaultMemoryLimit: "300Mi",
PodRoleLabel: "spilo-role",
},
EnableCrossNamespaceSecret: true,
},
}, client, pg, logger, eventRecorder)
userNamespaceMap := map[string]string{
cluster.Namespace: "db_user",
"appspace": "appspace.db_user",
}
err := cluster.initRobotUsers()
if err != nil {
t.Errorf("Could not create secret for namespaced users with error: %s", err)
}
for _, u := range cluster.pgUsers {
if u.Name != userNamespaceMap[u.Namespace] {
t.Errorf("%s: Could not create namespaced user in its correct namespaces for user %s in namespace %s", testName, u.Name, u.Namespace)
}
}
}
func TestValidUsernames(t *testing.T) {
testName := "test username validity"
invalidUsernames := []string{"_", ".", ".user", "appspace.", "user_", "_user", "-user", "user-", ",", "-", ",user", "user,", "namespace,user"}
validUsernames := []string{"user", "appspace.user", "appspace.dot.user", "user_name", "app_space.user_name"}
for _, username := range invalidUsernames {
if isValidUsername(username) {
t.Errorf("%s Invalid username is allowed: %s", testName, username)
}
}
for _, username := range validUsernames {
if !isValidUsername(username) {
t.Errorf("%s Valid username is not allowed: %s", testName, username)
}
}
}

View File

@ -3,6 +3,7 @@ package cluster
import ( import (
"context" "context"
"fmt" "fmt"
"reflect"
"strings" "strings"
"github.com/r3labs/diff" "github.com/r3labs/diff"
@ -60,7 +61,7 @@ func needMasterConnectionPooler(spec *acidv1.PostgresSpec) bool {
} }
func needMasterConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool { func needMasterConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool {
return (nil != spec.EnableConnectionPooler && *spec.EnableConnectionPooler) || return (spec.EnableConnectionPooler != nil && *spec.EnableConnectionPooler) ||
(spec.ConnectionPooler != nil && spec.EnableConnectionPooler == nil) (spec.ConnectionPooler != nil && spec.EnableConnectionPooler == nil)
} }
@ -114,7 +115,7 @@ func (c *Cluster) createConnectionPooler(LookupFunction InstallFunction) (SyncRe
c.setProcessName("creating connection pooler") c.setProcessName("creating connection pooler")
//this is essentially sync with nil as oldSpec //this is essentially sync with nil as oldSpec
if reason, err := c.syncConnectionPooler(nil, &c.Postgresql, LookupFunction); err != nil { if reason, err := c.syncConnectionPooler(&acidv1.Postgresql{}, &c.Postgresql, LookupFunction); err != nil {
return reason, err return reason, err
} }
return reason, nil return reason, nil
@ -140,11 +141,15 @@ func (c *Cluster) createConnectionPooler(LookupFunction InstallFunction) (SyncRe
// RESERVE_SIZE is how many additional connections to allow for a pooler. // RESERVE_SIZE is how many additional connections to allow for a pooler.
func (c *Cluster) getConnectionPoolerEnvVars() []v1.EnvVar { func (c *Cluster) getConnectionPoolerEnvVars() []v1.EnvVar {
spec := &c.Spec spec := &c.Spec
connectionPoolerSpec := spec.ConnectionPooler
if connectionPoolerSpec == nil {
connectionPoolerSpec = &acidv1.ConnectionPooler{}
}
effectiveMode := util.Coalesce( effectiveMode := util.Coalesce(
spec.ConnectionPooler.Mode, connectionPoolerSpec.Mode,
c.OpConfig.ConnectionPooler.Mode) c.OpConfig.ConnectionPooler.Mode)
numberOfInstances := spec.ConnectionPooler.NumberOfInstances numberOfInstances := connectionPoolerSpec.NumberOfInstances
if numberOfInstances == nil { if numberOfInstances == nil {
numberOfInstances = util.CoalesceInt32( numberOfInstances = util.CoalesceInt32(
c.OpConfig.ConnectionPooler.NumberOfInstances, c.OpConfig.ConnectionPooler.NumberOfInstances,
@ -152,7 +157,7 @@ func (c *Cluster) getConnectionPoolerEnvVars() []v1.EnvVar {
} }
effectiveMaxDBConn := util.CoalesceInt32( effectiveMaxDBConn := util.CoalesceInt32(
spec.ConnectionPooler.MaxDBConnections, connectionPoolerSpec.MaxDBConnections,
c.OpConfig.ConnectionPooler.MaxDBConnections) c.OpConfig.ConnectionPooler.MaxDBConnections)
if effectiveMaxDBConn == nil { if effectiveMaxDBConn == nil {
@ -201,17 +206,21 @@ func (c *Cluster) getConnectionPoolerEnvVars() []v1.EnvVar {
func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) ( func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
*v1.PodTemplateSpec, error) { *v1.PodTemplateSpec, error) {
spec := &c.Spec spec := &c.Spec
connectionPoolerSpec := spec.ConnectionPooler
if connectionPoolerSpec == nil {
connectionPoolerSpec = &acidv1.ConnectionPooler{}
}
gracePeriod := int64(c.OpConfig.PodTerminateGracePeriod.Seconds()) gracePeriod := int64(c.OpConfig.PodTerminateGracePeriod.Seconds())
resources, err := generateResourceRequirements( resources, err := generateResourceRequirements(
spec.ConnectionPooler.Resources, connectionPoolerSpec.Resources,
makeDefaultConnectionPoolerResources(&c.OpConfig)) makeDefaultConnectionPoolerResources(&c.OpConfig))
effectiveDockerImage := util.Coalesce( effectiveDockerImage := util.Coalesce(
spec.ConnectionPooler.DockerImage, connectionPoolerSpec.DockerImage,
c.OpConfig.ConnectionPooler.Image) c.OpConfig.ConnectionPooler.Image)
effectiveSchema := util.Coalesce( effectiveSchema := util.Coalesce(
spec.ConnectionPooler.Schema, connectionPoolerSpec.Schema,
c.OpConfig.ConnectionPooler.Schema) c.OpConfig.ConnectionPooler.Schema)
if err != nil { if err != nil {
@ -220,7 +229,7 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
secretSelector := func(key string) *v1.SecretKeySelector { secretSelector := func(key string) *v1.SecretKeySelector {
effectiveUser := util.Coalesce( effectiveUser := util.Coalesce(
spec.ConnectionPooler.User, connectionPoolerSpec.User,
c.OpConfig.ConnectionPooler.User) c.OpConfig.ConnectionPooler.User)
return &v1.SecretKeySelector{ return &v1.SecretKeySelector{
@ -285,6 +294,8 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
}, },
} }
tolerationsSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration)
podTemplate := &v1.PodTemplateSpec{ podTemplate := &v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Labels: c.connectionPoolerLabels(role, true).MatchLabels, Labels: c.connectionPoolerLabels(role, true).MatchLabels,
@ -294,12 +305,18 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
Spec: v1.PodSpec{ Spec: v1.PodSpec{
TerminationGracePeriodSeconds: &gracePeriod, TerminationGracePeriodSeconds: &gracePeriod,
Containers: []v1.Container{poolerContainer}, Containers: []v1.Container{poolerContainer},
// TODO: add tolerations to scheduler pooler on the same node Tolerations: tolerationsSpec,
// as database
//Tolerations: *tolerationsSpec,
}, },
} }
nodeAffinity := nodeAffinity(c.OpConfig.NodeReadinessLabel, spec.NodeAffinity)
if c.OpConfig.EnablePodAntiAffinity {
labelsSet := labels.Set(c.connectionPoolerLabels(role, false).MatchLabels)
podTemplate.Spec.Affinity = generatePodAffinity(labelsSet, c.OpConfig.PodAntiAffinityTopologyKey, nodeAffinity)
} else if nodeAffinity != nil {
podTemplate.Spec.Affinity = nodeAffinity
}
return podTemplate, nil return podTemplate, nil
} }
@ -313,12 +330,13 @@ func (c *Cluster) generateConnectionPoolerDeployment(connectionPooler *Connectio
// default values, initialize it to an empty structure. It could be done // default values, initialize it to an empty structure. It could be done
// anywhere, but here is the earliest common entry point between sync and // anywhere, but here is the earliest common entry point between sync and
// create code, so init here. // create code, so init here.
if spec.ConnectionPooler == nil { connectionPoolerSpec := spec.ConnectionPooler
spec.ConnectionPooler = &acidv1.ConnectionPooler{} if connectionPoolerSpec == nil {
connectionPoolerSpec = &acidv1.ConnectionPooler{}
} }
podTemplate, err := c.generateConnectionPoolerPodTemplate(connectionPooler.Role) podTemplate, err := c.generateConnectionPoolerPodTemplate(connectionPooler.Role)
numberOfInstances := spec.ConnectionPooler.NumberOfInstances numberOfInstances := connectionPoolerSpec.NumberOfInstances
if numberOfInstances == nil { if numberOfInstances == nil {
numberOfInstances = util.CoalesceInt32( numberOfInstances = util.CoalesceInt32(
c.OpConfig.ConnectionPooler.NumberOfInstances, c.OpConfig.ConnectionPooler.NumberOfInstances,
@ -363,16 +381,6 @@ func (c *Cluster) generateConnectionPoolerDeployment(connectionPooler *Connectio
func (c *Cluster) generateConnectionPoolerService(connectionPooler *ConnectionPoolerObjects) *v1.Service { func (c *Cluster) generateConnectionPoolerService(connectionPooler *ConnectionPoolerObjects) *v1.Service {
spec := &c.Spec spec := &c.Spec
// there are two ways to enable connection pooler, either to specify a
// connectionPooler section or enableConnectionPooler. In the second case
// spec.connectionPooler will be nil, so to make it easier to calculate
// default values, initialize it to an empty structure. It could be done
// anywhere, but here is the earliest common entry point between sync and
// create code, so init here.
if spec.ConnectionPooler == nil {
spec.ConnectionPooler = &acidv1.ConnectionPooler{}
}
serviceSpec := v1.ServiceSpec{ serviceSpec := v1.ServiceSpec{
Ports: []v1.ServicePort{ Ports: []v1.ServicePort{
{ {
@ -660,12 +668,14 @@ func makeDefaultConnectionPoolerResources(config *config.Config) acidv1.Resource
func logPoolerEssentials(log *logrus.Entry, oldSpec, newSpec *acidv1.Postgresql) { func logPoolerEssentials(log *logrus.Entry, oldSpec, newSpec *acidv1.Postgresql) {
var v []string var v []string
var input []*bool var input []*bool
newMasterConnectionPoolerEnabled := needMasterConnectionPoolerWorker(&newSpec.Spec)
if oldSpec == nil { if oldSpec == nil {
input = []*bool{nil, nil, newSpec.Spec.EnableConnectionPooler, newSpec.Spec.EnableReplicaConnectionPooler} input = []*bool{nil, nil, &newMasterConnectionPoolerEnabled, newSpec.Spec.EnableReplicaConnectionPooler}
} else { } else {
input = []*bool{oldSpec.Spec.EnableConnectionPooler, oldSpec.Spec.EnableReplicaConnectionPooler, newSpec.Spec.EnableConnectionPooler, newSpec.Spec.EnableReplicaConnectionPooler} oldMasterConnectionPoolerEnabled := needMasterConnectionPoolerWorker(&oldSpec.Spec)
input = []*bool{&oldMasterConnectionPoolerEnabled, oldSpec.Spec.EnableReplicaConnectionPooler, &newMasterConnectionPoolerEnabled, newSpec.Spec.EnableReplicaConnectionPooler}
} }
for _, b := range input { for _, b := range input {
@ -676,25 +686,16 @@ func logPoolerEssentials(log *logrus.Entry, oldSpec, newSpec *acidv1.Postgresql)
} }
} }
log.Debugf("syncing connection pooler from (%v, %v) to (%v, %v)", v[0], v[1], v[2], v[3]) log.Debugf("syncing connection pooler (master, replica) from (%v, %v) to (%v, %v)", v[0], v[1], v[2], v[3])
} }
func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, LookupFunction InstallFunction) (SyncReason, error) { func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, LookupFunction InstallFunction) (SyncReason, error) {
var reason SyncReason var reason SyncReason
var err error var err error
var newNeedConnectionPooler, oldNeedConnectionPooler bool var connectionPoolerNeeded bool
oldNeedConnectionPooler = false
if oldSpec == nil { needSync := !reflect.DeepEqual(oldSpec.Spec.ConnectionPooler, newSpec.Spec.ConnectionPooler)
oldSpec = &acidv1.Postgresql{
Spec: acidv1.PostgresSpec{
ConnectionPooler: &acidv1.ConnectionPooler{},
},
}
}
needSync, _ := needSyncConnectionPoolerSpecs(oldSpec.Spec.ConnectionPooler, newSpec.Spec.ConnectionPooler, c.logger)
masterChanges, err := diff.Diff(oldSpec.Spec.EnableConnectionPooler, newSpec.Spec.EnableConnectionPooler) masterChanges, err := diff.Diff(oldSpec.Spec.EnableConnectionPooler, newSpec.Spec.EnableConnectionPooler)
if err != nil { if err != nil {
c.logger.Error("Error in getting diff of master connection pooler changes") c.logger.Error("Error in getting diff of master connection pooler changes")
@ -704,15 +705,14 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, Look
c.logger.Error("Error in getting diff of replica connection pooler changes") c.logger.Error("Error in getting diff of replica connection pooler changes")
} }
// skip pooler sync only // skip pooler sync when theres no diff or it's deactivated
// 1. if there is no diff in spec, AND // but, handling the case when connectionPooler is not there but it is required
// 2. if connection pooler is already there and is also required as per newSpec
//
// Handling the case when connectionPooler is not there but it is required
// as per spec, hence do not skip syncing in that case, even though there // as per spec, hence do not skip syncing in that case, even though there
// is no diff in specs // is no diff in specs
if (!needSync && len(masterChanges) <= 0 && len(replicaChanges) <= 0) && if (!needSync && len(masterChanges) <= 0 && len(replicaChanges) <= 0) &&
(c.ConnectionPooler != nil && (needConnectionPooler(&newSpec.Spec))) { ((!needConnectionPooler(&newSpec.Spec) && (c.ConnectionPooler == nil || !needConnectionPooler(&oldSpec.Spec))) ||
(c.ConnectionPooler != nil && needConnectionPooler(&newSpec.Spec) &&
(c.ConnectionPooler[Master].LookupFunction || c.ConnectionPooler[Replica].LookupFunction))) {
c.logger.Debugln("syncing pooler is not required") c.logger.Debugln("syncing pooler is not required")
return nil, nil return nil, nil
} }
@ -723,15 +723,9 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, Look
for _, role := range [2]PostgresRole{Master, Replica} { for _, role := range [2]PostgresRole{Master, Replica} {
if role == Master { if role == Master {
newNeedConnectionPooler = needMasterConnectionPoolerWorker(&newSpec.Spec) connectionPoolerNeeded = needMasterConnectionPoolerWorker(&newSpec.Spec)
if oldSpec != nil {
oldNeedConnectionPooler = needMasterConnectionPoolerWorker(&oldSpec.Spec)
}
} else { } else {
newNeedConnectionPooler = needReplicaConnectionPoolerWorker(&newSpec.Spec) connectionPoolerNeeded = needReplicaConnectionPoolerWorker(&newSpec.Spec)
if oldSpec != nil {
oldNeedConnectionPooler = needReplicaConnectionPoolerWorker(&oldSpec.Spec)
}
} }
// if the call is via createConnectionPooler, then it is required to initialize // if the call is via createConnectionPooler, then it is required to initialize
@ -751,24 +745,22 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, Look
} }
} }
if newNeedConnectionPooler { if connectionPoolerNeeded {
// Try to sync in any case. If we didn't needed connection pooler before, // Try to sync in any case. If we didn't needed connection pooler before,
// it means we want to create it. If it was already present, still sync // it means we want to create it. If it was already present, still sync
// since it could happen that there is no difference in specs, and all // since it could happen that there is no difference in specs, and all
// the resources are remembered, but the deployment was manually deleted // the resources are remembered, but the deployment was manually deleted
// in between // in between
// in this case also do not forget to install lookup function as for // in this case also do not forget to install lookup function
// creating cluster if !c.ConnectionPooler[role].LookupFunction {
if !oldNeedConnectionPooler || !c.ConnectionPooler[role].LookupFunction { connectionPooler := c.Spec.ConnectionPooler
newConnectionPooler := newSpec.Spec.ConnectionPooler
specSchema := "" specSchema := ""
specUser := "" specUser := ""
if newConnectionPooler != nil { if connectionPooler != nil {
specSchema = newConnectionPooler.Schema specSchema = connectionPooler.Schema
specUser = newConnectionPooler.User specUser = connectionPooler.User
} }
schema := util.Coalesce( schema := util.Coalesce(
@ -779,9 +771,10 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, Look
specUser, specUser,
c.OpConfig.ConnectionPooler.User) c.OpConfig.ConnectionPooler.User)
if err = LookupFunction(schema, user, role); err != nil { if err = LookupFunction(schema, user); err != nil {
return NoSync, err return NoSync, err
} }
c.ConnectionPooler[role].LookupFunction = true
} }
if reason, err = c.syncConnectionPoolerWorker(oldSpec, newSpec, role); err != nil { if reason, err = c.syncConnectionPoolerWorker(oldSpec, newSpec, role); err != nil {
@ -800,8 +793,8 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, Look
} }
} }
} }
if !needMasterConnectionPoolerWorker(&newSpec.Spec) && if (needMasterConnectionPoolerWorker(&oldSpec.Spec) || needReplicaConnectionPoolerWorker(&oldSpec.Spec)) &&
!needReplicaConnectionPoolerWorker(&newSpec.Spec) { !needMasterConnectionPoolerWorker(&newSpec.Spec) && !needReplicaConnectionPoolerWorker(&newSpec.Spec) {
if err = c.deleteConnectionPoolerSecret(); err != nil { if err = c.deleteConnectionPoolerSecret(); err != nil {
c.logger.Warningf("could not remove connection pooler secret: %v", err) c.logger.Warningf("could not remove connection pooler secret: %v", err)
} }
@ -866,8 +859,6 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql
newConnectionPooler = &acidv1.ConnectionPooler{} newConnectionPooler = &acidv1.ConnectionPooler{}
} }
c.logger.Infof("old: %+v, new %+v", oldConnectionPooler, newConnectionPooler)
var specSync bool var specSync bool
var specReason []string var specReason []string

View File

@ -19,7 +19,7 @@ import (
"k8s.io/client-go/kubernetes/fake" "k8s.io/client-go/kubernetes/fake"
) )
func mockInstallLookupFunction(schema string, user string, role PostgresRole) error { func mockInstallLookupFunction(schema string, user string) error {
return nil return nil
} }

View File

@ -351,10 +351,30 @@ func (c *Cluster) execCreateDatabaseSchema(databaseName, schemaName, dbOwner, sc
} }
// set default privileges for schema // set default privileges for schema
// the schemaOwner defines them for global database roles
c.execAlterSchemaDefaultPrivileges(schemaName, schemaOwner, databaseName) c.execAlterSchemaDefaultPrivileges(schemaName, schemaOwner, databaseName)
// if schemaOwner and dbOwner differ we know that <databaseName>_<schemaName> default roles were created
if schemaOwner != dbOwner { if schemaOwner != dbOwner {
c.execAlterSchemaDefaultPrivileges(schemaName, dbOwner, databaseName+"_"+schemaName) defaultUsers := c.Spec.PreparedDatabases[databaseName].PreparedSchemas[schemaName].DefaultUsers
c.execAlterSchemaDefaultPrivileges(schemaName, schemaOwner, databaseName+"_"+schemaName)
// define schema privileges of <databaseName>_<schemaName>_owner_user for global roles, too
if defaultUsers {
c.execAlterSchemaDefaultPrivileges(schemaName, schemaOwner+constants.UserRoleNameSuffix, databaseName)
}
// collect all possible owner roles and define default schema privileges
// for <databaseName>_<schemaName>_reader/writer roles
owners := c.getOwnerRoles(databaseName, c.Spec.PreparedDatabases[databaseName].DefaultUsers)
owners = append(owners, c.getOwnerRoles(databaseName+"_"+schemaName, defaultUsers)...)
for _, owner := range owners {
c.execAlterSchemaDefaultPrivileges(schemaName, owner, databaseName+"_"+schemaName)
}
} else {
// define schema privileges of <databaseName>_owner_user for global roles, too
if c.Spec.PreparedDatabases[databaseName].DefaultUsers {
c.execAlterSchemaDefaultPrivileges(schemaName, schemaOwner+constants.UserRoleNameSuffix, databaseName)
}
} }
return nil return nil
@ -418,6 +438,15 @@ func makeUserFlags(rolsuper, rolinherit, rolcreaterole, rolcreatedb, rolcanlogin
return result return result
} }
func (c *Cluster) getOwnerRoles(dbObjPath string, withUser bool) (owners []string) {
owners = append(owners, dbObjPath+constants.OwnerRoleNameSuffix)
if withUser {
owners = append(owners, dbObjPath+constants.OwnerRoleNameSuffix+constants.UserRoleNameSuffix)
}
return owners
}
// getExtension returns the list of current database extensions // getExtension returns the list of current database extensions
// The caller is responsible for opening and closing the database connection // The caller is responsible for opening and closing the database connection
func (c *Cluster) getExtensions() (dbExtensions map[string]string, err error) { func (c *Cluster) getExtensions() (dbExtensions map[string]string, err error) {
@ -479,7 +508,7 @@ func (c *Cluster) execCreateOrAlterExtension(extName, schemaName, statement, doi
// Creates a connection pool credentials lookup function in every database to // Creates a connection pool credentials lookup function in every database to
// perform remote authentication. // perform remote authentication.
func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string, role PostgresRole) error { func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string) error {
var stmtBytes bytes.Buffer var stmtBytes bytes.Buffer
c.logger.Info("Installing lookup function") c.logger.Info("Installing lookup function")
@ -575,8 +604,8 @@ func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string, role Po
c.logger.Infof("pooler lookup function installed into %s", dbname) c.logger.Infof("pooler lookup function installed into %s", dbname)
} }
if len(failedDatabases) == 0 { if len(failedDatabases) > 0 {
c.ConnectionPooler[role].LookupFunction = true return fmt.Errorf("could not install pooler lookup function in every specified databases")
} }
return nil return nil

View File

@ -412,13 +412,33 @@ func tolerations(tolerationsSpec *[]v1.Toleration, podToleration map[string]stri
// Those parameters must go to the bootstrap/dcs/postgresql/parameters section. // Those parameters must go to the bootstrap/dcs/postgresql/parameters section.
// See http://patroni.readthedocs.io/en/latest/dynamic_configuration.html. // See http://patroni.readthedocs.io/en/latest/dynamic_configuration.html.
func isBootstrapOnlyParameter(param string) bool { func isBootstrapOnlyParameter(param string) bool {
return param == "max_connections" || params := map[string]bool{
param == "max_locks_per_transaction" || "archive_command": false,
param == "max_worker_processes" || "shared_buffers": false,
param == "max_prepared_transactions" || "logging_collector": false,
param == "wal_level" || "log_destination": false,
param == "wal_log_hints" || "log_directory": false,
param == "track_commit_timestamp" "log_filename": false,
"log_file_mode": false,
"log_rotation_age": false,
"log_truncate_on_rotation": false,
"ssl": false,
"ssl_ca_file": false,
"ssl_crl_file": false,
"ssl_cert_file": false,
"ssl_key_file": false,
"shared_preload_libraries": false,
"bg_mon.listen_address": false,
"bg_mon.history_buckets": false,
"pg_stat_statements.track_utility": false,
"extwlist.extensions": false,
"extwlist.custom_path": false,
}
result, ok := params[param]
if !ok {
result = true
}
return result
} }
func generateVolumeMounts(volume acidv1.Volume) []v1.VolumeMount { func generateVolumeMounts(volume acidv1.Volume) []v1.VolumeMount {
@ -778,6 +798,12 @@ func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration stri
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""}) envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""})
} }
if c.OpConfig.WALAZStorageAccount != "" {
envVars = append(envVars, v1.EnvVar{Name: "AZURE_STORAGE_ACCOUNT", Value: c.OpConfig.WALAZStorageAccount})
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))})
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""})
}
if c.OpConfig.GCPCredentials != "" { if c.OpConfig.GCPCredentials != "" {
envVars = append(envVars, v1.EnvVar{Name: "GOOGLE_APPLICATION_CREDENTIALS", Value: c.OpConfig.GCPCredentials}) envVars = append(envVars, v1.EnvVar{Name: "GOOGLE_APPLICATION_CREDENTIALS", Value: c.OpConfig.GCPCredentials})
} }
@ -1150,9 +1176,6 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
} }
// generate the spilo container // generate the spilo container
c.logger.Debugf("Generating Spilo container, environment variables")
c.logger.Debugf("%v", spiloEnvVars)
spiloContainer := generateContainer(constants.PostgresContainerName, spiloContainer := generateContainer(constants.PostgresContainerName,
&effectiveDockerImage, &effectiveDockerImage,
resourceRequirements, resourceRequirements,
@ -1255,7 +1278,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
} }
if volumeClaimTemplate, err = generatePersistentVolumeClaimTemplate(spec.Volume.Size, if volumeClaimTemplate, err = generatePersistentVolumeClaimTemplate(spec.Volume.Size,
spec.Volume.StorageClass); err != nil { spec.Volume.StorageClass, spec.Volume.Selector); err != nil {
return nil, fmt.Errorf("could not generate volume claim template: %v", err) return nil, fmt.Errorf("could not generate volume claim template: %v", err)
} }
@ -1503,7 +1526,8 @@ func (c *Cluster) addAdditionalVolumes(podSpec *v1.PodSpec,
podSpec.Volumes = volumes podSpec.Volumes = volumes
} }
func generatePersistentVolumeClaimTemplate(volumeSize, volumeStorageClass string) (*v1.PersistentVolumeClaim, error) { func generatePersistentVolumeClaimTemplate(volumeSize, volumeStorageClass string,
volumeSelector *metav1.LabelSelector) (*v1.PersistentVolumeClaim, error) {
var storageClassName *string var storageClassName *string
@ -1536,6 +1560,7 @@ func generatePersistentVolumeClaimTemplate(volumeSize, volumeStorageClass string
}, },
StorageClassName: storageClassName, StorageClassName: storageClassName,
VolumeMode: &volumeMode, VolumeMode: &volumeMode,
Selector: volumeSelector,
}, },
} }
@ -1547,10 +1572,11 @@ func (c *Cluster) generateUserSecrets() map[string]*v1.Secret {
namespace := c.Namespace namespace := c.Namespace
for username, pgUser := range c.pgUsers { for username, pgUser := range c.pgUsers {
//Skip users with no password i.e. human users (they'll be authenticated using pam) //Skip users with no password i.e. human users (they'll be authenticated using pam)
secret := c.generateSingleUserSecret(namespace, pgUser) secret := c.generateSingleUserSecret(pgUser.Namespace, pgUser)
if secret != nil { if secret != nil {
secrets[username] = secret secrets[username] = secret
} }
namespace = pgUser.Namespace
} }
/* special case for the system user */ /* special case for the system user */
for _, systemUser := range c.systemUsers { for _, systemUser := range c.systemUsers {
@ -1590,7 +1616,7 @@ func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser)
secret := v1.Secret{ secret := v1.Secret{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: c.credentialSecretName(username), Name: c.credentialSecretName(username),
Namespace: namespace, Namespace: pgUser.Namespace,
Labels: lbls, Labels: lbls,
Annotations: c.annotationsSet(nil), Annotations: c.annotationsSet(nil),
}, },
@ -1785,6 +1811,14 @@ func (c *Cluster) generateCloneEnvironment(description *acidv1.CloneDescription)
}, },
} }
result = append(result, envs...) result = append(result, envs...)
} else if c.OpConfig.WALAZStorageAccount != "" {
envs := []v1.EnvVar{
{
Name: "CLONE_AZURE_STORAGE_ACCOUNT",
Value: c.OpConfig.WALAZStorageAccount,
},
}
result = append(result, envs...)
} else { } else {
c.logger.Error("Cannot figure out S3 or GS bucket. Both are empty.") c.logger.Error("Cannot figure out S3 or GS bucket. Both are empty.")
} }

View File

@ -1207,6 +1207,12 @@ func TestSidecars(t *testing.T) {
} }
spec = acidv1.PostgresSpec{ spec = acidv1.PostgresSpec{
PostgresqlParam: acidv1.PostgresqlParam{
PgVersion: "12.1",
Parameters: map[string]string{
"max_connections": "100",
},
},
TeamID: "myapp", NumberOfInstances: 1, TeamID: "myapp", NumberOfInstances: 1,
Resources: acidv1.Resources{ Resources: acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"}, ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
@ -1503,3 +1509,106 @@ func TestGenerateCapabilities(t *testing.T) {
} }
} }
} }
func TestVolumeSelector(t *testing.T) {
testName := "TestVolumeSelector"
makeSpec := func(volume acidv1.Volume) acidv1.PostgresSpec {
return acidv1.PostgresSpec{
TeamID: "myapp",
NumberOfInstances: 0,
Resources: acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
},
Volume: volume,
}
}
tests := []struct {
subTest string
volume acidv1.Volume
wantSelector *metav1.LabelSelector
}{
{
subTest: "PVC template has no selector",
volume: acidv1.Volume{
Size: "1G",
},
wantSelector: nil,
},
{
subTest: "PVC template has simple label selector",
volume: acidv1.Volume{
Size: "1G",
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"environment": "unittest"},
},
},
wantSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"environment": "unittest"},
},
},
{
subTest: "PVC template has full selector",
volume: acidv1.Volume{
Size: "1G",
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"environment": "unittest"},
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "flavour",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"banana", "chocolate"},
},
},
},
},
wantSelector: &metav1.LabelSelector{
MatchLabels: map[string]string{"environment": "unittest"},
MatchExpressions: []metav1.LabelSelectorRequirement{
{
Key: "flavour",
Operator: metav1.LabelSelectorOpIn,
Values: []string{"banana", "chocolate"},
},
},
},
},
}
cluster := New(
Config{
OpConfig: config.Config{
PodManagementPolicy: "ordered_ready",
ProtectedRoles: []string{"admin"},
Auth: config.Auth{
SuperUsername: superUserName,
ReplicationUsername: replicationUserName,
},
},
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
for _, tt := range tests {
pgSpec := makeSpec(tt.volume)
sts, err := cluster.generateStatefulSet(&pgSpec)
if err != nil {
t.Fatalf("%s %s: no statefulset created %v", testName, tt.subTest, err)
}
volIdx := len(sts.Spec.VolumeClaimTemplates)
for i, ct := range sts.Spec.VolumeClaimTemplates {
if ct.ObjectMeta.Name == constants.DataVolumeName {
volIdx = i
break
}
}
if volIdx == len(sts.Spec.VolumeClaimTemplates) {
t.Errorf("%s %s: no datavolume found in sts", testName, tt.subTest)
}
selector := sts.Spec.VolumeClaimTemplates[volIdx].Spec.Selector
if !reflect.DeepEqual(selector, tt.wantSelector) {
t.Errorf("%s %s: expected: %#v but got: %#v", testName, tt.subTest, tt.wantSelector, selector)
}
}
}

View File

@ -304,8 +304,19 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error {
} }
masterCandidateName := util.NameFromMeta(masterCandidatePod.ObjectMeta) masterCandidateName := util.NameFromMeta(masterCandidatePod.ObjectMeta)
if err := c.Switchover(oldMaster, masterCandidateName); err != nil { err = retryutil.Retry(1*time.Minute, 5*time.Minute,
return fmt.Errorf("could not failover to pod %q: %v", masterCandidateName, err) func() (bool, error) {
err := c.Switchover(oldMaster, masterCandidateName)
if err != nil {
c.logger.Errorf("could not failover to pod %q: %v", masterCandidateName, err)
return false, nil
}
return true, nil
},
)
if err != nil {
return fmt.Errorf("could not migrate master pod: %v", err)
} }
return nil return nil

View File

@ -32,7 +32,7 @@ func (c *Cluster) listResources() error {
} }
for _, obj := range c.Secrets { for _, obj := range c.Secrets {
c.logger.Infof("found secret: %q (uid: %q)", util.NameFromMeta(obj.ObjectMeta), obj.UID) c.logger.Infof("found secret: %q (uid: %q) namesapce: %s", util.NameFromMeta(obj.ObjectMeta), obj.UID, obj.ObjectMeta.Namespace)
} }
for role, endpoint := range c.Endpoints { for role, endpoint := range c.Endpoints {

View File

@ -2,9 +2,12 @@ package cluster
import ( import (
"context" "context"
"encoding/json"
"fmt" "fmt"
"reflect"
"regexp" "regexp"
"strings" "strings"
"time"
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
"github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/spec"
@ -260,13 +263,18 @@ func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error {
} }
func (c *Cluster) syncStatefulSet() error { func (c *Cluster) syncStatefulSet() error {
var (
masterPod *v1.Pod
postgresConfig map[string]interface{}
instanceRestartRequired bool
)
podsToRecreate := make([]v1.Pod, 0) podsToRecreate := make([]v1.Pod, 0)
switchoverCandidates := make([]spec.NamespacedName, 0) switchoverCandidates := make([]spec.NamespacedName, 0)
pods, err := c.listPods() pods, err := c.listPods()
if err != nil { if err != nil {
c.logger.Infof("could not list pods of the statefulset: %v", err) c.logger.Warnf("could not list pods of the statefulset: %v", err)
} }
// NB: Be careful to consider the codepath that acts on podsRollingUpdateRequired before returning early. // NB: Be careful to consider the codepath that acts on podsRollingUpdateRequired before returning early.
@ -379,8 +387,48 @@ func (c *Cluster) syncStatefulSet() error {
// Apply special PostgreSQL parameters that can only be set via the Patroni API. // Apply special PostgreSQL parameters that can only be set via the Patroni API.
// it is important to do it after the statefulset pods are there, but before the rolling update // it is important to do it after the statefulset pods are there, but before the rolling update
// since those parameters require PostgreSQL restart. // since those parameters require PostgreSQL restart.
if err := c.checkAndSetGlobalPostgreSQLConfiguration(); err != nil { pods, err = c.listPods()
return fmt.Errorf("could not set cluster-wide PostgreSQL configuration options: %v", err) if err != nil {
c.logger.Warnf("could not get list of pods to apply special PostgreSQL parameters only to be set via Patroni API: %v", err)
}
// get Postgres config, compare with manifest and update via Patroni PATCH endpoint if it differs
// Patroni's config endpoint is just a "proxy" to DCS. It is enough to patch it only once and it doesn't matter which pod is used.
for i, pod := range pods {
podName := util.NameFromMeta(pods[i].ObjectMeta)
config, err := c.patroni.GetConfig(&pod)
if err != nil {
c.logger.Warningf("could not get Postgres config from pod %s: %v", podName, err)
continue
}
instanceRestartRequired, err = c.checkAndSetGlobalPostgreSQLConfiguration(&pod, config)
if err != nil {
c.logger.Warningf("could not set PostgreSQL configuration options for pod %s: %v", podName, err)
continue
}
break
}
// if the config update requires a restart, call Patroni restart for replicas first, then master
if instanceRestartRequired {
c.logger.Debug("restarting Postgres server within pods")
ttl, ok := postgresConfig["ttl"].(int32)
if !ok {
ttl = 30
}
for i, pod := range pods {
role := PostgresRole(pod.Labels[c.OpConfig.PodRoleLabel])
if role == Master {
masterPod = &pods[i]
continue
}
c.restartInstance(&pod)
time.Sleep(time.Duration(ttl) * time.Second)
}
if masterPod != nil {
c.restartInstance(masterPod)
}
} }
// if we get here we also need to re-create the pods (either leftovers from the old // if we get here we also need to re-create the pods (either leftovers from the old
@ -396,6 +444,21 @@ func (c *Cluster) syncStatefulSet() error {
return nil return nil
} }
func (c *Cluster) restartInstance(pod *v1.Pod) {
podName := util.NameFromMeta(pod.ObjectMeta)
role := PostgresRole(pod.Labels[c.OpConfig.PodRoleLabel])
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", fmt.Sprintf("restarting Postgres server within %s pod %s", role, pod.Name))
if err := c.patroni.Restart(pod); err != nil {
c.logger.Warningf("could not restart Postgres server within %s pod %s: %v", role, podName, err)
return
}
c.logger.Debugf("Postgres server successfuly restarted in %s pod %s", role, podName)
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", fmt.Sprintf("Postgres server restart done for %s pod %s", role, pod.Name))
}
// AnnotationsToPropagate get the annotations to update if required // AnnotationsToPropagate get the annotations to update if required
// based on the annotations in postgres CRD // based on the annotations in postgres CRD
func (c *Cluster) AnnotationsToPropagate(annotations map[string]string) map[string]string { func (c *Cluster) AnnotationsToPropagate(annotations map[string]string) map[string]string {
@ -429,46 +492,77 @@ func (c *Cluster) AnnotationsToPropagate(annotations map[string]string) map[stri
} }
// checkAndSetGlobalPostgreSQLConfiguration checks whether cluster-wide API parameters // checkAndSetGlobalPostgreSQLConfiguration checks whether cluster-wide API parameters
// (like max_connections) has changed and if necessary sets it via the Patroni API // (like max_connections) have changed and if necessary sets it via the Patroni API
func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration() error { func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration(pod *v1.Pod, patroniConfig map[string]interface{}) (bool, error) {
var ( configToSet := make(map[string]interface{})
err error parametersToSet := make(map[string]string)
pods []v1.Pod effectivePgParameters := make(map[string]interface{})
)
// we need to extract those options from the cluster manifest. // read effective Patroni config if set
optionsToSet := make(map[string]string) if patroniConfig != nil {
pgOptions := c.Spec.Parameters effectivePostgresql := patroniConfig["postgresql"].(map[string]interface{})
effectivePgParameters = effectivePostgresql[patroniPGParametersParameterName].(map[string]interface{})
}
for k, v := range pgOptions { // compare parameters under postgresql section with c.Spec.Postgresql.Parameters from manifest
if isBootstrapOnlyParameter(k) { desiredPgParameters := c.Spec.Parameters
optionsToSet[k] = v for desiredOption, desiredValue := range desiredPgParameters {
effectiveValue := effectivePgParameters[desiredOption]
if isBootstrapOnlyParameter(desiredOption) && (effectiveValue != desiredValue) {
parametersToSet[desiredOption] = desiredValue
} }
} }
if len(optionsToSet) == 0 { if len(parametersToSet) > 0 {
return nil configToSet["postgresql"] = map[string]interface{}{patroniPGParametersParameterName: parametersToSet}
} }
if pods, err = c.listPods(); err != nil { // compare other options from config with c.Spec.Patroni from manifest
return err desiredPatroniConfig := c.Spec.Patroni
if desiredPatroniConfig.LoopWait > 0 && desiredPatroniConfig.LoopWait != uint32(patroniConfig["loop_wait"].(float64)) {
configToSet["loop_wait"] = desiredPatroniConfig.LoopWait
} }
if len(pods) == 0 { if desiredPatroniConfig.MaximumLagOnFailover > 0 && desiredPatroniConfig.MaximumLagOnFailover != float32(patroniConfig["maximum_lag_on_failover"].(float64)) {
return fmt.Errorf("could not call Patroni API: cluster has no pods") configToSet["maximum_lag_on_failover"] = desiredPatroniConfig.MaximumLagOnFailover
} }
if desiredPatroniConfig.PgHba != nil && !reflect.DeepEqual(desiredPatroniConfig.PgHba, (patroniConfig["pg_hba"])) {
configToSet["pg_hba"] = desiredPatroniConfig.PgHba
}
if desiredPatroniConfig.RetryTimeout > 0 && desiredPatroniConfig.RetryTimeout != uint32(patroniConfig["retry_timeout"].(float64)) {
configToSet["retry_timeout"] = desiredPatroniConfig.RetryTimeout
}
if desiredPatroniConfig.Slots != nil && !reflect.DeepEqual(desiredPatroniConfig.Slots, patroniConfig["slots"]) {
configToSet["slots"] = desiredPatroniConfig.Slots
}
if desiredPatroniConfig.SynchronousMode != patroniConfig["synchronous_mode"] {
configToSet["synchronous_mode"] = desiredPatroniConfig.SynchronousMode
}
if desiredPatroniConfig.SynchronousModeStrict != patroniConfig["synchronous_mode_strict"] {
configToSet["synchronous_mode_strict"] = desiredPatroniConfig.SynchronousModeStrict
}
if desiredPatroniConfig.TTL > 0 && desiredPatroniConfig.TTL != uint32(patroniConfig["ttl"].(float64)) {
configToSet["ttl"] = desiredPatroniConfig.TTL
}
if len(configToSet) == 0 {
return false, nil
}
configToSetJson, err := json.Marshal(configToSet)
if err != nil {
c.logger.Debugf("could not convert config patch to JSON: %v", err)
}
// try all pods until the first one that is successful, as it doesn't matter which pod // try all pods until the first one that is successful, as it doesn't matter which pod
// carries the request to change configuration through // carries the request to change configuration through
for _, pod := range pods {
podName := util.NameFromMeta(pod.ObjectMeta) podName := util.NameFromMeta(pod.ObjectMeta)
c.logger.Debugf("calling Patroni API on a pod %s to set the following Postgres options: %v", c.logger.Debugf("patching Postgres config via Patroni API on pod %s with following options: %s",
podName, optionsToSet) podName, configToSetJson)
if err = c.patroni.SetPostgresParameters(&pod, optionsToSet); err == nil { if err = c.patroni.SetConfig(pod, configToSet); err != nil {
return nil return true, fmt.Errorf("could not patch postgres parameters with a pod %s: %v", podName, err)
} }
c.logger.Warningf("could not patch postgres parameters with a pod %s: %v", podName, err)
} return true, nil
return fmt.Errorf("could not reach Patroni API to set Postgres options: failed on every pod (%d total)",
len(pods))
} }
func (c *Cluster) syncSecrets() error { func (c *Cluster) syncSecrets() error {
@ -483,7 +577,7 @@ func (c *Cluster) syncSecrets() error {
for secretUsername, secretSpec := range secrets { for secretUsername, secretSpec := range secrets {
if secret, err = c.KubeClient.Secrets(secretSpec.Namespace).Create(context.TODO(), secretSpec, metav1.CreateOptions{}); err == nil { if secret, err = c.KubeClient.Secrets(secretSpec.Namespace).Create(context.TODO(), secretSpec, metav1.CreateOptions{}); err == nil {
c.Secrets[secret.UID] = secret c.Secrets[secret.UID] = secret
c.logger.Debugf("created new secret %s, uid: %s", util.NameFromMeta(secret.ObjectMeta), secret.UID) c.logger.Debugf("created new secret %s, namespace: %s, uid: %s", util.NameFromMeta(secret.ObjectMeta), secretSpec.Namespace, secret.UID)
continue continue
} }
if k8sutil.ResourceAlreadyExists(err) { if k8sutil.ResourceAlreadyExists(err) {
@ -521,7 +615,7 @@ func (c *Cluster) syncSecrets() error {
userMap[secretUsername] = pwdUser userMap[secretUsername] = pwdUser
} }
} else { } else {
return fmt.Errorf("could not create secret for user %s: %v", secretUsername, err) return fmt.Errorf("could not create secret for user %s: in namespace %s: %v", secretUsername, secretSpec.Namespace, err)
} }
} }
@ -556,11 +650,12 @@ func (c *Cluster) syncRoles() (err error) {
// create list of database roles to query // create list of database roles to query
for _, u := range c.pgUsers { for _, u := range c.pgUsers {
userNames = append(userNames, u.Name) pgRole := u.Name
userNames = append(userNames, pgRole)
// add team member role name with rename suffix in case we need to rename it back // add team member role name with rename suffix in case we need to rename it back
if u.Origin == spec.RoleOriginTeamsAPI && c.OpConfig.EnableTeamMemberDeprecation { if u.Origin == spec.RoleOriginTeamsAPI && c.OpConfig.EnableTeamMemberDeprecation {
deletedUsers[u.Name+c.OpConfig.RoleDeletionSuffix] = u.Name deletedUsers[pgRole+c.OpConfig.RoleDeletionSuffix] = pgRole
userNames = append(userNames, u.Name+c.OpConfig.RoleDeletionSuffix) userNames = append(userNames, pgRole+c.OpConfig.RoleDeletionSuffix)
} }
} }
@ -663,15 +758,27 @@ func (c *Cluster) syncDatabases() error {
} }
} }
if len(createDatabases) > 0 {
// trigger creation of pooler objects in new database in syncConnectionPooler
if c.ConnectionPooler != nil {
for _, role := range [2]PostgresRole{Master, Replica} {
c.ConnectionPooler[role].LookupFunction = false
}
}
}
// set default privileges for prepared database // set default privileges for prepared database
for _, preparedDatabase := range preparedDatabases { for _, preparedDatabase := range preparedDatabases {
if err := c.initDbConnWithName(preparedDatabase); err != nil { if err := c.initDbConnWithName(preparedDatabase); err != nil {
return fmt.Errorf("could not init database connection to %s", preparedDatabase) return fmt.Errorf("could not init database connection to %s", preparedDatabase)
} }
if err = c.execAlterGlobalDefaultPrivileges(preparedDatabase+constants.OwnerRoleNameSuffix, preparedDatabase); err != nil {
for _, owner := range c.getOwnerRoles(preparedDatabase, c.Spec.PreparedDatabases[preparedDatabase].DefaultUsers) {
if err = c.execAlterGlobalDefaultPrivileges(owner, preparedDatabase); err != nil {
return err return err
} }
} }
}
return nil return nil
} }

View File

@ -72,7 +72,7 @@ type ClusterStatus struct {
type TemplateParams map[string]interface{} type TemplateParams map[string]interface{}
type InstallFunction func(schema string, user string, role PostgresRole) error type InstallFunction func(schema string, user string) error
type SyncReason []string type SyncReason []string

View File

@ -96,13 +96,13 @@ func (c *Cluster) syncUnderlyingEBSVolume() error {
var modifySize *int64 var modifySize *int64
var modifyType *string var modifyType *string
if targetValue.Iops != nil { if targetValue.Iops != nil && *targetValue.Iops >= int64(3000) {
if volume.Iops != *targetValue.Iops { if volume.Iops != *targetValue.Iops {
modifyIops = targetValue.Iops modifyIops = targetValue.Iops
} }
} }
if targetValue.Throughput != nil { if targetValue.Throughput != nil && *targetValue.Throughput >= int64(125) {
if volume.Throughput != *targetValue.Throughput { if volume.Throughput != *targetValue.Throughput {
modifyThroughput = targetValue.Throughput modifyThroughput = targetValue.Throughput
} }

View File

@ -39,7 +39,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.EnableSpiloWalPathCompat = fromCRD.EnableSpiloWalPathCompat result.EnableSpiloWalPathCompat = fromCRD.EnableSpiloWalPathCompat
result.EtcdHost = fromCRD.EtcdHost result.EtcdHost = fromCRD.EtcdHost
result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps
result.DockerImage = util.Coalesce(fromCRD.DockerImage, "registry.opensource.zalan.do/acid/spilo-13:2.0-p7") result.DockerImage = util.Coalesce(fromCRD.DockerImage, "registry.opensource.zalan.do/acid/spilo-13:2.1-p1")
result.Workers = util.CoalesceUInt32(fromCRD.Workers, 8) result.Workers = util.CoalesceUInt32(fromCRD.Workers, 8)
result.MinInstances = fromCRD.MinInstances result.MinInstances = fromCRD.MinInstances
result.MaxInstances = fromCRD.MaxInstances result.MaxInstances = fromCRD.MaxInstances
@ -82,6 +82,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.EnableSidecars = util.CoalesceBool(fromCRD.Kubernetes.EnableSidecars, util.True()) result.EnableSidecars = util.CoalesceBool(fromCRD.Kubernetes.EnableSidecars, util.True())
result.SecretNameTemplate = fromCRD.Kubernetes.SecretNameTemplate result.SecretNameTemplate = fromCRD.Kubernetes.SecretNameTemplate
result.OAuthTokenSecretName = fromCRD.Kubernetes.OAuthTokenSecretName result.OAuthTokenSecretName = fromCRD.Kubernetes.OAuthTokenSecretName
result.EnableCrossNamespaceSecret = fromCRD.Kubernetes.EnableCrossNamespaceSecret
result.InfrastructureRolesSecretName = fromCRD.Kubernetes.InfrastructureRolesSecretName result.InfrastructureRolesSecretName = fromCRD.Kubernetes.InfrastructureRolesSecretName
if fromCRD.Kubernetes.InfrastructureRolesDefs != nil { if fromCRD.Kubernetes.InfrastructureRolesDefs != nil {
@ -145,6 +146,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.KubeIAMRole = fromCRD.AWSGCP.KubeIAMRole result.KubeIAMRole = fromCRD.AWSGCP.KubeIAMRole
result.WALGSBucket = fromCRD.AWSGCP.WALGSBucket result.WALGSBucket = fromCRD.AWSGCP.WALGSBucket
result.GCPCredentials = fromCRD.AWSGCP.GCPCredentials result.GCPCredentials = fromCRD.AWSGCP.GCPCredentials
result.WALAZStorageAccount = fromCRD.AWSGCP.WALAZStorageAccount
result.AdditionalSecretMount = fromCRD.AWSGCP.AdditionalSecretMount result.AdditionalSecretMount = fromCRD.AWSGCP.AdditionalSecretMount
result.AdditionalSecretMountPath = util.Coalesce(fromCRD.AWSGCP.AdditionalSecretMountPath, "/meta/credentials") result.AdditionalSecretMountPath = util.Coalesce(fromCRD.AWSGCP.AdditionalSecretMountPath, "/meta/credentials")
result.EnableEBSGp3Migration = fromCRD.AWSGCP.EnableEBSGp3Migration result.EnableEBSGp3Migration = fromCRD.AWSGCP.EnableEBSGp3Migration
@ -152,7 +154,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
// logical backup config // logical backup config
result.LogicalBackupSchedule = util.Coalesce(fromCRD.LogicalBackup.Schedule, "30 00 * * *") result.LogicalBackupSchedule = util.Coalesce(fromCRD.LogicalBackup.Schedule, "30 00 * * *")
result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "registry.opensource.zalan.do/acid/logical-backup:v1.6.3") result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "registry.opensource.zalan.do/acid/logical-backup:v1.7.0")
result.LogicalBackupProvider = util.Coalesce(fromCRD.LogicalBackup.BackupProvider, "s3") result.LogicalBackupProvider = util.Coalesce(fromCRD.LogicalBackup.BackupProvider, "s3")
result.LogicalBackupS3Bucket = fromCRD.LogicalBackup.S3Bucket result.LogicalBackupS3Bucket = fromCRD.LogicalBackup.S3Bucket
result.LogicalBackupS3Region = fromCRD.LogicalBackup.S3Region result.LogicalBackupS3Region = fromCRD.LogicalBackup.S3Region

View File

@ -49,6 +49,7 @@ const (
type PgUser struct { type PgUser struct {
Origin RoleOrigin `yaml:"-"` Origin RoleOrigin `yaml:"-"`
Name string `yaml:"-"` Name string `yaml:"-"`
Namespace string `yaml:"-"`
Password string `yaml:"-"` Password string `yaml:"-"`
Flags []string `yaml:"user_flags"` Flags []string `yaml:"user_flags"`
MemberOf []string `yaml:"inrole"` MemberOf []string `yaml:"inrole"`

View File

@ -114,7 +114,7 @@ type Scalyr struct {
// LogicalBackup defines configuration for logical backup // LogicalBackup defines configuration for logical backup
type LogicalBackup struct { type LogicalBackup struct {
LogicalBackupSchedule string `name:"logical_backup_schedule" default:"30 00 * * *"` LogicalBackupSchedule string `name:"logical_backup_schedule" default:"30 00 * * *"`
LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"registry.opensource.zalan.do/acid/logical-backup:v1.6.3"` LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"registry.opensource.zalan.do/acid/logical-backup:v1.7.0"`
LogicalBackupProvider string `name:"logical_backup_provider" default:"s3"` LogicalBackupProvider string `name:"logical_backup_provider" default:"s3"`
LogicalBackupS3Bucket string `name:"logical_backup_s3_bucket" default:""` LogicalBackupS3Bucket string `name:"logical_backup_s3_bucket" default:""`
LogicalBackupS3Region string `name:"logical_backup_s3_region" default:""` LogicalBackupS3Region string `name:"logical_backup_s3_region" default:""`
@ -152,7 +152,7 @@ type Config struct {
WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to' WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to'
KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"` KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"`
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-13:2.0-p7"` DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-13:2.1-p1"`
SidecarImages map[string]string `name:"sidecar_docker_images"` // deprecated in favour of SidecarContainers SidecarImages map[string]string `name:"sidecar_docker_images"` // deprecated in favour of SidecarContainers
SidecarContainers []v1.Container `name:"sidecars"` SidecarContainers []v1.Container `name:"sidecars"`
PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"` PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"`
@ -167,6 +167,7 @@ type Config struct {
KubeIAMRole string `name:"kube_iam_role"` KubeIAMRole string `name:"kube_iam_role"`
WALGSBucket string `name:"wal_gs_bucket"` WALGSBucket string `name:"wal_gs_bucket"`
GCPCredentials string `name:"gcp_credentials"` GCPCredentials string `name:"gcp_credentials"`
WALAZStorageAccount string `name:"wal_az_storage_account"`
AdditionalSecretMount string `name:"additional_secret_mount"` AdditionalSecretMount string `name:"additional_secret_mount"`
AdditionalSecretMountPath string `name:"additional_secret_mount_path" default:"/meta/credentials"` AdditionalSecretMountPath string `name:"additional_secret_mount_path" default:"/meta/credentials"`
EnableEBSGp3Migration bool `name:"enable_ebs_gp3_migration" default:"false"` EnableEBSGp3Migration bool `name:"enable_ebs_gp3_migration" default:"false"`
@ -207,6 +208,7 @@ type Config struct {
PostgresSuperuserTeams []string `name:"postgres_superuser_teams" default:""` PostgresSuperuserTeams []string `name:"postgres_superuser_teams" default:""`
SetMemoryRequestToLimit bool `name:"set_memory_request_to_limit" default:"false"` SetMemoryRequestToLimit bool `name:"set_memory_request_to_limit" default:"false"`
EnableLazySpiloUpgrade bool `name:"enable_lazy_spilo_upgrade" default:"false"` EnableLazySpiloUpgrade bool `name:"enable_lazy_spilo_upgrade" default:"false"`
EnableCrossNamespaceSecret bool `name:"enable_cross_namespace_secret" default:"false"`
EnablePgVersionEnvVar bool `name:"enable_pgversion_env_var" default:"true"` EnablePgVersionEnvVar bool `name:"enable_pgversion_env_var" default:"true"`
EnableSpiloWalPathCompat bool `name:"enable_spilo_wal_path_compat" default:"false"` EnableSpiloWalPathCompat bool `name:"enable_spilo_wal_path_compat" default:"false"`
MajorVersionUpgradeMode string `name:"major_version_upgrade_mode" default:"off"` MajorVersionUpgradeMode string `name:"major_version_upgrade_mode" default:"off"`

View File

@ -19,6 +19,8 @@ import (
const ( const (
failoverPath = "/failover" failoverPath = "/failover"
configPath = "/config" configPath = "/config"
statusPath = "/patroni"
restartPath = "/restart"
apiPort = 8008 apiPort = 8008
timeout = 30 * time.Second timeout = 30 * time.Second
) )
@ -28,6 +30,9 @@ type Interface interface {
Switchover(master *v1.Pod, candidate string) error Switchover(master *v1.Pod, candidate string) error
SetPostgresParameters(server *v1.Pod, options map[string]string) error SetPostgresParameters(server *v1.Pod, options map[string]string) error
GetMemberData(server *v1.Pod) (MemberData, error) GetMemberData(server *v1.Pod) (MemberData, error)
Restart(server *v1.Pod) error
GetConfig(server *v1.Pod) (map[string]interface{}, error)
SetConfig(server *v1.Pod, config map[string]interface{}) error
} }
// Patroni API client // Patroni API client
@ -103,6 +108,32 @@ func (p *Patroni) httpPostOrPatch(method string, url string, body *bytes.Buffer)
return nil return nil
} }
func (p *Patroni) httpGet(url string) (string, error) {
request, err := http.NewRequest("GET", url, nil)
if err != nil {
return "", fmt.Errorf("could not create request: %v", err)
}
p.logger.Debugf("making GET http request: %s", request.URL.String())
resp, err := p.httpClient.Do(request)
if err != nil {
return "", fmt.Errorf("could not make request: %v", err)
}
bodyBytes, err := ioutil.ReadAll(resp.Body)
if err != nil {
return "", fmt.Errorf("could not read response: %v", err)
}
if err := resp.Body.Close(); err != nil {
return "", fmt.Errorf("could not close request: %v", err)
}
if resp.StatusCode != http.StatusOK {
return string(bodyBytes), fmt.Errorf("patroni returned '%d'", resp.StatusCode)
}
return string(bodyBytes), nil
}
// Switchover by calling Patroni REST API // Switchover by calling Patroni REST API
func (p *Patroni) Switchover(master *v1.Pod, candidate string) error { func (p *Patroni) Switchover(master *v1.Pod, candidate string) error {
buf := &bytes.Buffer{} buf := &bytes.Buffer{}
@ -133,6 +164,20 @@ func (p *Patroni) SetPostgresParameters(server *v1.Pod, parameters map[string]st
return p.httpPostOrPatch(http.MethodPatch, apiURLString+configPath, buf) return p.httpPostOrPatch(http.MethodPatch, apiURLString+configPath, buf)
} }
//SetConfig sets Patroni options via Patroni patch API call.
func (p *Patroni) SetConfig(server *v1.Pod, config map[string]interface{}) error {
buf := &bytes.Buffer{}
err := json.NewEncoder(buf).Encode(config)
if err != nil {
return fmt.Errorf("could not encode json: %v", err)
}
apiURLString, err := apiURL(server)
if err != nil {
return err
}
return p.httpPostOrPatch(http.MethodPatch, apiURLString+configPath, buf)
}
// MemberDataPatroni child element // MemberDataPatroni child element
type MemberDataPatroni struct { type MemberDataPatroni struct {
Version string `json:"version"` Version string `json:"version"`
@ -149,6 +194,48 @@ type MemberData struct {
Patroni MemberDataPatroni `json:"patroni"` Patroni MemberDataPatroni `json:"patroni"`
} }
func (p *Patroni) GetConfigOrStatus(server *v1.Pod, path string) (map[string]interface{}, error) {
result := make(map[string]interface{})
apiURLString, err := apiURL(server)
if err != nil {
return result, err
}
body, err := p.httpGet(apiURLString + path)
err = json.Unmarshal([]byte(body), &result)
if err != nil {
return result, err
}
return result, err
}
func (p *Patroni) GetStatus(server *v1.Pod) (map[string]interface{}, error) {
return p.GetConfigOrStatus(server, statusPath)
}
func (p *Patroni) GetConfig(server *v1.Pod) (map[string]interface{}, error) {
return p.GetConfigOrStatus(server, configPath)
}
//Restart method restarts instance via Patroni POST API call.
func (p *Patroni) Restart(server *v1.Pod) error {
buf := &bytes.Buffer{}
err := json.NewEncoder(buf).Encode(map[string]interface{}{"restart_pending": true})
if err != nil {
return fmt.Errorf("could not encode json: %v", err)
}
apiURLString, err := apiURL(server)
if err != nil {
return err
}
status, err := p.GetStatus(server)
pending_restart, ok := status["pending_restart"]
if !ok || !pending_restart.(bool) {
return nil
}
return p.httpPostOrPatch(http.MethodPost, apiURLString+restartPath, buf)
}
// GetMemberData read member data from patroni API // GetMemberData read member data from patroni API
func (p *Patroni) GetMemberData(server *v1.Pod) (MemberData, error) { func (p *Patroni) GetMemberData(server *v1.Pod) (MemberData, error) {

View File

@ -71,3 +71,25 @@ spec:
"11" "11"
] ]
} }
# Exemple of settings to make snapshot view working in the ui when using AWS
# - name: WALE_S3_ENDPOINT
# value: https+path://s3.us-east-1.amazonaws.com:443
# - name: SPILO_S3_BACKUP_PREFIX
# value: spilo/
# - name: AWS_ACCESS_KEY_ID
# valueFrom:
# secretKeyRef:
# name: <postgres operator secret with AWS token>
# key: AWS_ACCESS_KEY_ID
# - name: AWS_SECRET_ACCESS_KEY
# valueFrom:
# secretKeyRef:
# name: <postgres operator secret with AWS token>
# key: AWS_SECRET_ACCESS_KEY
# - name: AWS_DEFAULT_REGION
# valueFrom:
# secretKeyRef:
# name: <postgres operator secret with AWS token>
# key: AWS_DEFAULT_REGION
# - name: SPILO_S3_BACKUP_BUCKET
# value: <s3 bucket used by the operator>

View File

@ -1,4 +1,4 @@
apiVersion: "networking.k8s.io/v1beta1" apiVersion: "networking.k8s.io/v1"
kind: "Ingress" kind: "Ingress"
metadata: metadata:
name: "postgres-operator-ui" name: "postgres-operator-ui"
@ -10,6 +10,10 @@ spec:
- host: "ui.example.org" - host: "ui.example.org"
http: http:
paths: paths:
- backend: - path: /
serviceName: "postgres-operator-ui" pathType: ImplementationSpecific
servicePort: 80 backend:
service:
name: "postgres-operator-ui"
port:
number: 80