Merge branch 'master' into fes-support
This commit is contained in:
commit
79bd69d3ab
|
|
@ -9,7 +9,7 @@ assignees: ''
|
|||
|
||||
Please, answer some short questions which should help us to understand your problem / question better?
|
||||
|
||||
- **Which image of the operator are you using?** e.g. registry.opensource.zalan.do/acid/postgres-operator:v1.6.3
|
||||
- **Which image of the operator are you using?** e.g. registry.opensource.zalan.do/acid/postgres-operator:v1.7.0
|
||||
- **Where do you run it - cloud or metal? Kubernetes or OpenShift?** [AWS K8s | GCP ... | Bare Metal K8s]
|
||||
- **Are you running Postgres Operator in production?** [yes | no]
|
||||
- **Type of issue?** [Bug report, question, feature request, etc.]
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ We introduce the major version into the backup path to smoothen the [major versi
|
|||
The new operator configuration can set a compatibility flag *enable_spilo_wal_path_compat* to make Spilo look for wal segments in the current path but also old format paths.
|
||||
This comes at potential performance costs and should be disabled after a few days.
|
||||
|
||||
The newest Spilo 13 image is: `registry.opensource.zalan.do/acid/spilo-13:2.0-p7`
|
||||
The newest Spilo 13 image is: `registry.opensource.zalan.do/acid/spilo-13:2.1-p1`
|
||||
|
||||
The last Spilo 12 image is: `registry.opensource.zalan.do/acid/spilo-12:1.6-p5`
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v1
|
||||
name: postgres-operator-ui
|
||||
version: 1.6.3
|
||||
appVersion: 1.6.3
|
||||
version: 1.7.0
|
||||
appVersion: 1.7.0
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
||||
keywords:
|
||||
|
|
|
|||
|
|
@ -1,10 +1,34 @@
|
|||
apiVersion: v1
|
||||
entries:
|
||||
postgres-operator-ui:
|
||||
- apiVersion: v1
|
||||
appVersion: 1.7.0
|
||||
created: "2021-08-27T10:23:17.723412079+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: ad08ee5fe31bb2e7c3cc1299c2e778511a3c05305bc17357404b2615b32ea92a
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- ui
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator-ui
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-ui-1.7.0.tgz
|
||||
version: 1.7.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.3
|
||||
created: "2021-05-27T19:04:33.425637932+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
||||
created: "2021-08-27T10:23:17.722255571+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: 08b810aa632dcc719e4785ef184e391267f7c460caa99677f2d00719075aac78
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
|
|
@ -25,8 +49,9 @@ entries:
|
|||
version: 1.6.3
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.2
|
||||
created: "2021-05-27T19:04:33.422124263+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
||||
created: "2021-08-27T10:23:17.721712848+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: 14d1559bb0bd1e1e828f2daaaa6f6ac9ffc268d79824592c3589b55dd39241f6
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
|
|
@ -47,8 +72,9 @@ entries:
|
|||
version: 1.6.2
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.1
|
||||
created: "2021-05-27T19:04:33.419640902+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
||||
created: "2021-08-27T10:23:17.721175629+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: 3d321352f2f1e7bb7450aa8876e3d818aa9f9da9bd4250507386f0490f2c1969
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
|
|
@ -69,8 +95,9 @@ entries:
|
|||
version: 1.6.1
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.0
|
||||
created: "2021-05-27T19:04:33.41788193+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
||||
created: "2021-08-27T10:23:17.720655498+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: 1e0aa1e7db3c1daa96927ffbf6fdbcdb434562f961833cb5241ddbe132220ee4
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
|
|
@ -91,8 +118,9 @@ entries:
|
|||
version: 1.6.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.5.0
|
||||
created: "2021-05-27T19:04:33.416056821+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
||||
created: "2021-08-27T10:23:17.720112359+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: c91ea39e6d51d57f4048fb1b6ec53b40823f2690eb88e4e4f1a036367b9fdd61
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
|
|
@ -111,4 +139,4 @@ entries:
|
|||
urls:
|
||||
- postgres-operator-ui-1.5.0.tgz
|
||||
version: 1.5.0
|
||||
generated: "2021-05-27T19:04:33.41380858+02:00"
|
||||
generated: "2021-08-27T10:23:17.719397521+02:00"
|
||||
|
|
|
|||
Binary file not shown.
|
|
@ -76,3 +76,6 @@ spec:
|
|||
"11"
|
||||
]
|
||||
}
|
||||
{{- if .Values.extraEnvs }}
|
||||
{{- .Values.extraEnvs | toYaml | nindent 12 }}
|
||||
{{- end }}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,10 @@
|
|||
{{- if .Values.ingress.enabled -}}
|
||||
{{- $fullName := include "postgres-operator-ui.fullname" . -}}
|
||||
{{- $svcPort := .Values.service.port -}}
|
||||
{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
|
||||
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1
|
||||
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
|
||||
apiVersion: networking.k8s.io/v1beta1
|
||||
{{- else -}}
|
||||
apiVersion: extensions/v1beta1
|
||||
|
|
@ -37,9 +40,18 @@ spec:
|
|||
paths:
|
||||
{{- range .paths }}
|
||||
- path: {{ . }}
|
||||
{{ if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion -}}
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
service:
|
||||
name: {{ $fullName }}
|
||||
port:
|
||||
number: {{ $svcPort }}
|
||||
{{- else -}}
|
||||
backend:
|
||||
serviceName: {{ $fullName }}
|
||||
servicePort: {{ $svcPort }}
|
||||
{{- end -}}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ replicaCount: 1
|
|||
image:
|
||||
registry: registry.opensource.zalan.do
|
||||
repository: acid/postgres-operator-ui
|
||||
tag: v1.6.3
|
||||
tag: v1.7.0
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# Optionally specify an array of imagePullSecrets.
|
||||
|
|
@ -48,6 +48,36 @@ envs:
|
|||
teams:
|
||||
- "acid"
|
||||
|
||||
# configure extra UI ENVs
|
||||
# Extra ENVs are writen in kubenertes format and added "as is" to the pod's env variables
|
||||
# https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/
|
||||
# https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#environment-variables
|
||||
# UI specific env variables can be found here: https://github.com/zalando/postgres-operator/blob/master/ui/operator_ui/main.py
|
||||
extraEnvs:
|
||||
[]
|
||||
# Exemple of settings to make snapshot view working in the ui when using AWS
|
||||
# - name: WALE_S3_ENDPOINT
|
||||
# value: https+path://s3.us-east-1.amazonaws.com:443
|
||||
# - name: SPILO_S3_BACKUP_PREFIX
|
||||
# value: spilo/
|
||||
# - name: AWS_ACCESS_KEY_ID
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: <postgres operator secret with AWS token>
|
||||
# key: AWS_ACCESS_KEY_ID
|
||||
# - name: AWS_SECRET_ACCESS_KEY
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: <postgres operator secret with AWS token>
|
||||
# key: AWS_SECRET_ACCESS_KEY
|
||||
# - name: AWS_DEFAULT_REGION
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: <postgres operator secret with AWS token>
|
||||
# key: AWS_DEFAULT_REGION
|
||||
# - name: SPILO_S3_BACKUP_BUCKET
|
||||
# value: <s3 bucket used by the operator>
|
||||
|
||||
# configure UI service
|
||||
service:
|
||||
type: "ClusterIP"
|
||||
|
|
@ -59,7 +89,8 @@ service:
|
|||
# configure UI ingress. If needed: "enabled: true"
|
||||
ingress:
|
||||
enabled: false
|
||||
annotations: {}
|
||||
annotations:
|
||||
{}
|
||||
# kubernetes.io/ingress.class: nginx
|
||||
# kubernetes.io/tls-acme: "true"
|
||||
hosts:
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v1
|
||||
name: postgres-operator
|
||||
version: 1.6.3
|
||||
appVersion: 1.6.3
|
||||
version: 1.7.0
|
||||
appVersion: 1.7.0
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
||||
keywords:
|
||||
|
|
|
|||
|
|
@ -65,7 +65,7 @@ spec:
|
|||
properties:
|
||||
docker_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/spilo-13:2.0-p7"
|
||||
default: "registry.opensource.zalan.do/acid/spilo-13:2.1-p1"
|
||||
enable_crd_validation:
|
||||
type: boolean
|
||||
default: true
|
||||
|
|
@ -395,12 +395,14 @@ spec:
|
|||
type: string
|
||||
wal_s3_bucket:
|
||||
type: string
|
||||
wal_az_storage_account:
|
||||
type: string
|
||||
logical_backup:
|
||||
type: object
|
||||
properties:
|
||||
logical_backup_docker_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/logical-backup:v1.6.3"
|
||||
default: "registry.opensource.zalan.do/acid/logical-backup:v1.7.0"
|
||||
logical_backup_google_application_credentials:
|
||||
type: string
|
||||
logical_backup_job_prefix:
|
||||
|
|
@ -535,7 +537,7 @@ spec:
|
|||
default: "pooler"
|
||||
connection_pooler_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/pgbouncer:master-16"
|
||||
default: "registry.opensource.zalan.do/acid/pgbouncer:master-18"
|
||||
connection_pooler_max_db_connections:
|
||||
type: integer
|
||||
default: 60
|
||||
|
|
|
|||
|
|
@ -394,6 +394,8 @@ spec:
|
|||
type: boolean
|
||||
defaultRoles:
|
||||
type: boolean
|
||||
secretNamespace:
|
||||
type: string
|
||||
replicaLoadBalancer: # deprecated
|
||||
type: boolean
|
||||
resources:
|
||||
|
|
@ -591,6 +593,24 @@ spec:
|
|||
properties:
|
||||
iops:
|
||||
type: integer
|
||||
selector:
|
||||
type: object
|
||||
properties:
|
||||
matchExpressions:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
key:
|
||||
type: string
|
||||
operator:
|
||||
type: string
|
||||
values:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
matchLabels:
|
||||
type: object
|
||||
size:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
|
|
|
|||
|
|
@ -1,10 +1,33 @@
|
|||
apiVersion: v1
|
||||
entries:
|
||||
postgres-operator:
|
||||
- apiVersion: v1
|
||||
appVersion: 1.7.0
|
||||
created: "2021-08-27T10:21:42.643185124+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: 1c4a1d289188ef72e409892fd2b86c008a37420af04a9796a8829ff84ab09e61
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-1.7.0.tgz
|
||||
version: 1.7.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.3
|
||||
created: "2021-05-27T19:04:25.199523943+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
||||
created: "2021-08-27T10:21:42.640069574+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: ea08f991bf23c9ad114bca98ebcbe3e2fa15beab163061399394905eaee89b35
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
|
|
@ -24,8 +47,9 @@ entries:
|
|||
version: 1.6.3
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.2
|
||||
created: "2021-05-27T19:04:25.198182197+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
||||
created: "2021-08-27T10:21:42.638502739+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: d886f8a0879ca07d1e5246ee7bc55710e1c872f3977280fe495db6fc2057a7f4
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
|
|
@ -45,8 +69,9 @@ entries:
|
|||
version: 1.6.2
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.1
|
||||
created: "2021-05-27T19:04:25.19687586+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
||||
created: "2021-08-27T10:21:42.636936467+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: 4ba5972cd486dcaa2d11c5613a6f97f6b7b831822e610fe9e10a57ea1db23556
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
|
|
@ -66,8 +91,9 @@ entries:
|
|||
version: 1.6.1
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.0
|
||||
created: "2021-05-27T19:04:25.195600766+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
||||
created: "2021-08-27T10:21:42.63533527+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: f52149718ea364f46b4b9eec9a65f6253ad182bb78df541d14cd5277b9c8a8c3
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
|
|
@ -87,8 +113,9 @@ entries:
|
|||
version: 1.6.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.5.0
|
||||
created: "2021-05-27T19:04:25.193985032+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
||||
created: "2021-08-27T10:21:42.632932257+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: 198351d5db52e65cdf383d6f3e1745d91ac1e2a01121f8476f8b1be728b09531
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
|
|
@ -106,4 +133,4 @@ entries:
|
|||
urls:
|
||||
- postgres-operator-1.5.0.tgz
|
||||
version: 1.5.0
|
||||
generated: "2021-05-27T19:04:25.191897769+02:00"
|
||||
generated: "2021-08-27T10:21:42.631372502+02:00"
|
||||
|
|
|
|||
Binary file not shown.
|
|
@ -57,18 +57,16 @@ Flatten nested config options when ConfigMap is used as ConfigTarget
|
|||
*/}}
|
||||
{{- define "flattenValuesForConfigMap" }}
|
||||
{{- range $key, $value := . }}
|
||||
{{- if or (kindIs "string" $value) (kindIs "int" $value) }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- if kindIs "slice" $value }}
|
||||
{{ $key }}: {{ join "," $value | quote }}
|
||||
{{- end }}
|
||||
{{- if kindIs "map" $value }}
|
||||
{{- else if kindIs "map" $value }}
|
||||
{{- $list := list }}
|
||||
{{- range $subKey, $subValue := $value }}
|
||||
{{- $list = append $list (printf "%s:%s" $subKey $subValue) }}
|
||||
{{ $key }}: {{ join "," $list | quote }}
|
||||
{{- end }}
|
||||
{{- else }}
|
||||
{{ $key }}: {{ $value | quote }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
image:
|
||||
registry: registry.opensource.zalan.do
|
||||
repository: acid/postgres-operator
|
||||
tag: v1.6.3
|
||||
tag: v1.7.0
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# Optionally specify an array of imagePullSecrets.
|
||||
|
|
@ -35,7 +35,7 @@ configGeneral:
|
|||
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
|
||||
# kubernetes_use_configmaps: false
|
||||
# Spilo docker image
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p7
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.1-p1
|
||||
# min number of instances in Postgres cluster. -1 = no limit
|
||||
min_instances: -1
|
||||
# max number of instances in Postgres cluster. -1 = no limit
|
||||
|
|
@ -268,10 +268,13 @@ configAwsOrGcp:
|
|||
# GCS bucket to use for shipping WAL segments with WAL-E
|
||||
# wal_gs_bucket: ""
|
||||
|
||||
# Azure Storage Account to use for shipping WAL segments with WAL-G
|
||||
# wal_az_storage_account: ""
|
||||
|
||||
# configure K8s cron job managed by the operator
|
||||
configLogicalBackup:
|
||||
# image for pods of the logical backup job (example runs pg_dumpall)
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.3"
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.7.0"
|
||||
# path of google cloud service account json file
|
||||
# logical_backup_google_application_credentials: ""
|
||||
|
||||
|
|
@ -336,7 +339,7 @@ configConnectionPooler:
|
|||
# db user for pooler to use
|
||||
connection_pooler_user: "pooler"
|
||||
# docker image
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-16"
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-18"
|
||||
# max db connections the pooler should hold
|
||||
connection_pooler_max_db_connections: 60
|
||||
# default pooling mode
|
||||
|
|
|
|||
|
|
@ -3,6 +3,21 @@
|
|||
Learn how to configure and manage the Postgres Operator in your Kubernetes (K8s)
|
||||
environment.
|
||||
|
||||
## Upgrading the operator
|
||||
|
||||
The Postgres Operator is upgraded by changing the docker image within the
|
||||
deployment. Before doing so, it is recommended to check the release notes
|
||||
for new configuration options or changed behavior you might want to reflect
|
||||
in the ConfigMap or config CRD. E.g. a new feature might get introduced which
|
||||
is enabled or disabled by default and you want to change it to the opposite
|
||||
with the corresponding flag option.
|
||||
|
||||
When using helm, be aware that installing the new chart will not update the
|
||||
`Postgresql` and `OperatorConfiguration` CRD. Make sure to update them before
|
||||
with the provided manifests in the `crds` folder. Otherwise, you might face
|
||||
errors about new Postgres manifest or configuration options being unknown
|
||||
to the CRD schema validation.
|
||||
|
||||
## Minor and major version upgrade
|
||||
|
||||
Minor version upgrades for PostgreSQL are handled via updating the Spilo Docker
|
||||
|
|
@ -157,20 +172,26 @@ from numerous escape characters in the latter log entry, view it in CLI with
|
|||
`PodTemplate` used by the operator is yet to be updated with the default values
|
||||
used internally in K8s.
|
||||
|
||||
The operator also support lazy updates of the Spilo image. That means the pod
|
||||
template of a PG cluster's stateful set is updated immediately with the new
|
||||
image, but no rolling update follows. This feature saves you a switchover - and
|
||||
hence downtime - when you know pods are re-started later anyway, for instance
|
||||
due to the node rotation. To force a rolling update, disable this mode by
|
||||
setting the `enable_lazy_spilo_upgrade` to `false` in the operator configuration
|
||||
and restart the operator pod. With the standard eager rolling updates the
|
||||
operator checks during Sync all pods run images specified in their respective
|
||||
statefulsets. The operator triggers a rolling upgrade for PG clusters that
|
||||
violate this condition.
|
||||
The StatefulSet is replaced if the following properties change:
|
||||
- annotations
|
||||
- volumeClaimTemplates
|
||||
- template volumes
|
||||
|
||||
Changes in $SPILO\_CONFIGURATION under path bootstrap.dcs are ignored when
|
||||
StatefulSets are being compared, if there are changes under this path, they are
|
||||
applied through rest api interface and following restart of patroni instance
|
||||
The StatefulSet is replaced and a rolling updates is triggered if the following
|
||||
properties differ between the old and new state:
|
||||
- container name, ports, image, resources, env, envFrom, securityContext and volumeMounts
|
||||
- template labels, annotations, service account, securityContext, affinity, priority class and termination grace period
|
||||
|
||||
Note that, changes in `SPILO_CONFIGURATION` env variable under `bootstrap.dcs`
|
||||
path are ignored for the diff. They will be applied through Patroni's rest api
|
||||
interface, following a restart of all instances.
|
||||
|
||||
The operator also support lazy updates of the Spilo image. In this case the
|
||||
StatefulSet is only updated, but no rolling update follows. This feature saves
|
||||
you a switchover - and hence downtime - when you know pods are re-started later
|
||||
anyway, for instance due to the node rotation. To force a rolling update,
|
||||
disable this mode by setting the `enable_lazy_spilo_upgrade` to `false` in the
|
||||
operator configuration and restart the operator pod.
|
||||
|
||||
## Delete protection via annotations
|
||||
|
||||
|
|
@ -667,6 +688,12 @@ if it ends up in your specified WAL backup path:
|
|||
envdir "/run/etc/wal-e.d/env" /scripts/postgres_backup.sh "/home/postgres/pgdata/pgroot/data"
|
||||
```
|
||||
|
||||
You can also check if Spilo is able to find any backups:
|
||||
|
||||
```bash
|
||||
envdir "/run/etc/wal-e.d/env" wal-g backup-list
|
||||
```
|
||||
|
||||
Depending on the cloud storage provider different [environment variables](https://github.com/zalando/spilo/blob/master/ENVIRONMENT.rst)
|
||||
have to be set for Spilo. Not all of them are generated automatically by the
|
||||
operator by changing its configuration. In this case you have to use an
|
||||
|
|
@ -734,8 +761,15 @@ WALE_S3_ENDPOINT='https+path://s3.eu-central-1.amazonaws.com:443'
|
|||
WALE_S3_PREFIX=$WAL_S3_BUCKET/spilo/{WAL_BUCKET_SCOPE_PREFIX}{SCOPE}{WAL_BUCKET_SCOPE_SUFFIX}/wal/{PGVERSION}
|
||||
```
|
||||
|
||||
If the prefix is not specified Spilo will generate it from `WAL_S3_BUCKET`.
|
||||
When the `AWS_REGION` is set `AWS_ENDPOINT` and `WALE_S3_ENDPOINT` are
|
||||
The operator sets the prefix to an empty string so that spilo will generate it
|
||||
from the configured `WAL_S3_BUCKET`.
|
||||
|
||||
:warning: When you overwrite the configuration by defining `WAL_S3_BUCKET` in
|
||||
the [pod_environment_configmap](#custom-pod-environment-variables) you have
|
||||
to set `WAL_BUCKET_SCOPE_PREFIX = ""`, too. Otherwise Spilo will not find
|
||||
the physical backups on restore (next chapter).
|
||||
|
||||
When the `AWS_REGION` is set, `AWS_ENDPOINT` and `WALE_S3_ENDPOINT` are
|
||||
generated automatically. `WALG_S3_PREFIX` is identical to `WALE_S3_PREFIX`.
|
||||
`SCOPE` is the Postgres cluster name.
|
||||
|
||||
|
|
@ -808,6 +842,63 @@ pod_environment_configmap: "postgres-operator-system/pod-env-overrides"
|
|||
...
|
||||
```
|
||||
|
||||
### Azure setup
|
||||
|
||||
To configure the operator on Azure these prerequisites are needed:
|
||||
|
||||
* A storage account in the same region as the Kubernetes cluster.
|
||||
|
||||
The configuration parameters that we will be using are:
|
||||
|
||||
* `pod_environment_secret`
|
||||
* `wal_az_storage_account`
|
||||
|
||||
1. Generate the K8s secret resource that will contain your storage account's
|
||||
access key. You will need a copy of this secret in every namespace you want to
|
||||
create postgresql clusters.
|
||||
|
||||
The latest version of WAL-G (v1.0) supports the use of a SASS token, but you'll
|
||||
have to make due with using the primary or secondary access token until the
|
||||
version of WAL-G is updated in the postgres-operator.
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: psql-backup-creds
|
||||
namespace: default
|
||||
type: Opaque
|
||||
stringData:
|
||||
AZURE_STORAGE_ACCESS_KEY: <primary or secondary access key>
|
||||
```
|
||||
|
||||
2. Setup pod environment configmap that instructs the operator to use WAL-G,
|
||||
instead of WAL-E, for backup and restore.
|
||||
```yml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: pod-env-overrides
|
||||
namespace: postgres-operator-system
|
||||
data:
|
||||
# Any env variable used by spilo can be added
|
||||
USE_WALG_BACKUP: "true"
|
||||
USE_WALG_RESTORE: "true"
|
||||
CLONE_USE_WALG_RESTORE: "true"
|
||||
```
|
||||
|
||||
3. Setup your operator configuration values. With the `psql-backup-creds`
|
||||
and `pod-env-overrides` resources applied to your cluster, ensure that the operator's configuration
|
||||
is set up like the following:
|
||||
```yml
|
||||
...
|
||||
aws_or_gcp:
|
||||
pod_environment_secret: "pgsql-backup-creds"
|
||||
pod_environment_configmap: "postgres-operator-system/pod-env-overrides"
|
||||
wal_az_storage_account: "postgresbackupsbucket28302F2" # name of storage account to save the WAL-G logs
|
||||
...
|
||||
```
|
||||
|
||||
### Restoring physical backups
|
||||
|
||||
If cluster members have to be (re)initialized restoring physical backups
|
||||
|
|
@ -817,6 +908,36 @@ on one of the other running instances (preferably replicas if they do not lag
|
|||
behind). You can test restoring backups by [cloning](user.md#how-to-clone-an-existing-postgresql-cluster)
|
||||
clusters.
|
||||
|
||||
If you need to provide a [custom clone environment](#custom-pod-environment-variables)
|
||||
copy existing variables about your setup (backup location, prefix, access
|
||||
keys etc.) and prepend the `CLONE_` prefix to get them copied to the correct
|
||||
directory within Spilo.
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: postgres-pod-config
|
||||
data:
|
||||
AWS_REGION: "eu-west-1"
|
||||
AWS_ACCESS_KEY_ID: "****"
|
||||
AWS_SECRET_ACCESS_KEY: "****"
|
||||
...
|
||||
CLONE_AWS_REGION: "eu-west-1"
|
||||
CLONE_AWS_ACCESS_KEY_ID: "****"
|
||||
CLONE_AWS_SECRET_ACCESS_KEY: "****"
|
||||
...
|
||||
```
|
||||
|
||||
### Standby clusters
|
||||
|
||||
The setup for [standby clusters](user.md#setting-up-a-standby-cluster) is very
|
||||
similar to cloning. At the moment, the operator only allows for streaming from
|
||||
the S3 WAL archive of the master specified in the manifest. Like with cloning,
|
||||
if you are using [additional environment variables](#custom-pod-environment-variables)
|
||||
to access your backup location you have to copy those variables and prepend the
|
||||
`STANDBY_` prefix for Spilo to find the backups and WAL files to stream.
|
||||
|
||||
## Logical backups
|
||||
|
||||
The operator can manage K8s cron jobs to run logical backups (SQL dumps) of
|
||||
|
|
@ -954,7 +1075,7 @@ make docker
|
|||
|
||||
# build in image in minikube docker env
|
||||
eval $(minikube docker-env)
|
||||
docker build -t registry.opensource.zalan.do/acid/postgres-operator-ui:v1.6.3 .
|
||||
docker build -t registry.opensource.zalan.do/acid/postgres-operator-ui:v1.7.0 .
|
||||
|
||||
# apply UI manifests next to a running Postgres Operator
|
||||
kubectl apply -f manifests/
|
||||
|
|
|
|||
|
|
@ -109,7 +109,11 @@ These parameters are grouped directly under the `spec` key in the manifest.
|
|||
`SUPERUSER`, `REPLICATION`, `INHERIT`, `LOGIN`, `NOLOGIN`, `CREATEROLE`,
|
||||
`CREATEDB`, `BYPASSURL`. A login user is created by default unless NOLOGIN is
|
||||
specified, in which case the operator creates a role. One can specify empty
|
||||
flags by providing a JSON empty array '*[]*'. Optional.
|
||||
flags by providing a JSON empty array '*[]*'. If the config option
|
||||
`enable_cross_namespace_secrets` is enabled you can specify the namespace in
|
||||
the user name in the form `{namespace}.{username}` and the operator will
|
||||
create the K8s secret in that namespace. The part after the first `.` is
|
||||
considered to be the user name. Optional.
|
||||
|
||||
* **databases**
|
||||
a map of database names to database owners for the databases that should be
|
||||
|
|
@ -185,6 +189,35 @@ These parameters are grouped directly under the `spec` key in the manifest.
|
|||
If you set the `all` special item, it will be mounted in all containers (postgres + sidecars).
|
||||
Else you can set the list of target containers in which the additional volumes will be mounted (eg : postgres, telegraf)
|
||||
|
||||
## Prepared Databases
|
||||
|
||||
The operator can create databases with default owner, reader and writer roles
|
||||
without the need to specifiy them under `users` or `databases` sections. Those
|
||||
parameters are grouped under the `preparedDatabases` top-level key. For more
|
||||
information, see [user docs](../user.md#prepared-databases-with-roles-and-default-privileges).
|
||||
|
||||
* **defaultUsers**
|
||||
The operator will always create default `NOLOGIN` roles for defined prepared
|
||||
databases, but if `defaultUsers` is set to `true` three additional `LOGIN`
|
||||
roles with `_user` suffix will get created. Default is `false`.
|
||||
|
||||
* **extensions**
|
||||
map of extensions with target database schema that the operator will install
|
||||
in the database. Optional.
|
||||
|
||||
* **schemas**
|
||||
map of schemas that the operator will create. Optional - if no schema is
|
||||
listed, the operator will create a schema called `data`. Under each schema
|
||||
key, it can be defined if `defaultRoles` (NOLOGIN) and `defaultUsers` (LOGIN)
|
||||
roles shall be created that have schema-exclusive privileges. Both flags are
|
||||
set to `false` by default.
|
||||
|
||||
* **secretNamespace**
|
||||
for each default LOGIN role the operator will create a secret. You can
|
||||
specify the namespace in which these secrets will get created, if
|
||||
`enable_cross_namespace_secrets` is set to `true` in the config. Otherwise,
|
||||
the cluster namespace is used.
|
||||
|
||||
## Postgres parameters
|
||||
|
||||
Those parameters are grouped under the `postgresql` top-level key, which is
|
||||
|
|
@ -258,7 +291,9 @@ explanation of `ttl` and `loop_wait` parameters.
|
|||
|
||||
Those parameters define [CPU and memory requests and limits](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/)
|
||||
for the Postgres container. They are grouped under the `resources` top-level
|
||||
key with subgroups `requests` and `limits`.
|
||||
key with subgroups `requests` and `limits`. The whole section is optional,
|
||||
however if you specify a request or limit you have to define everything
|
||||
(unless you are not modifying the default CRD schema validation).
|
||||
|
||||
### Requests
|
||||
|
||||
|
|
@ -266,11 +301,11 @@ CPU and memory requests for the Postgres container.
|
|||
|
||||
* **cpu**
|
||||
CPU requests for the Postgres container. Optional, overrides the
|
||||
`default_cpu_requests` operator configuration parameter. Optional.
|
||||
`default_cpu_requests` operator configuration parameter.
|
||||
|
||||
* **memory**
|
||||
memory requests for the Postgres container. Optional, overrides the
|
||||
`default_memory_request` operator configuration parameter. Optional.
|
||||
`default_memory_request` operator configuration parameter.
|
||||
|
||||
### Limits
|
||||
|
||||
|
|
@ -278,11 +313,11 @@ CPU and memory limits for the Postgres container.
|
|||
|
||||
* **cpu**
|
||||
CPU limits for the Postgres container. Optional, overrides the
|
||||
`default_cpu_limits` operator configuration parameter. Optional.
|
||||
`default_cpu_limits` operator configuration parameter.
|
||||
|
||||
* **memory**
|
||||
memory limits for the Postgres container. Optional, overrides the
|
||||
`default_memory_limits` operator configuration parameter. Optional.
|
||||
`default_memory_limits` operator configuration parameter.
|
||||
|
||||
## Parameters defining how to clone the cluster from another one
|
||||
|
||||
|
|
@ -364,6 +399,11 @@ properties of the persistent storage that stores Postgres data.
|
|||
When running the operator on AWS the latest generation of EBS volumes (`gp3`)
|
||||
allows for configuring the throughput in MB/s. Maximum is 1000. Optional.
|
||||
|
||||
* **selector**
|
||||
A label query over PVs to consider for binding. See the [Kubernetes
|
||||
documentation](https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/)
|
||||
for details on using `matchLabels` and `matchExpressions`. Optional
|
||||
|
||||
## Sidecar definitions
|
||||
|
||||
Those parameters are defined under the `sidecars` key. They consist of a list
|
||||
|
|
|
|||
|
|
@ -267,9 +267,7 @@ configuration they are grouped under the `kubernetes` key.
|
|||
* **enable_cross_namespace_secrets**
|
||||
To allow secrets in a different namespace other than the Postgres cluster
|
||||
namespace. Once enabled, specify the namespace in the user name under the
|
||||
`users` section in the form `{namespace}.{username}`. The operator will then
|
||||
create the user secret in that namespace. The part after the first `.` is
|
||||
considered to be the user name. The default is `false`.
|
||||
`users` section in the form `{namespace}.{username}`. The default is `false`.
|
||||
|
||||
* **enable_init_containers**
|
||||
global option to allow for creating init containers in the cluster manifest to
|
||||
|
|
@ -559,6 +557,12 @@ yet officially supported.
|
|||
[service accounts](https://cloud.google.com/kubernetes-engine/docs/tutorials/authenticating-to-cloud-platform).
|
||||
The default is empty
|
||||
|
||||
* **wal_az_storage_account**
|
||||
Azure Storage Account to use for shipping WAL segments with WAL-G. The
|
||||
storage account must exist and be accessible by Postgres pods. Note, only the
|
||||
name of the storage account is required.
|
||||
The default is empty.
|
||||
|
||||
* **log_s3_bucket**
|
||||
S3 bucket to use for shipping Postgres daily logs. Works only with S3 on AWS.
|
||||
The bucket has to be present and accessible by Postgres pods. The default is
|
||||
|
|
@ -602,7 +606,7 @@ grouped under the `logical_backup` key.
|
|||
runs `pg_dumpall` on a replica if possible and uploads compressed results to
|
||||
an S3 bucket under the key `/spilo/pg_cluster_name/cluster_k8s_uuid/logical_backups`.
|
||||
The default image is the same image built with the Zalando-internal CI
|
||||
pipeline. Default: "registry.opensource.zalan.do/acid/logical-backup:v1.6.3"
|
||||
pipeline. Default: "registry.opensource.zalan.do/acid/logical-backup:v1.7.0"
|
||||
|
||||
* **logical_backup_google_application_credentials**
|
||||
Specifies the path of the google cloud service account json file. Default is empty.
|
||||
|
|
|
|||
35
docs/user.md
35
docs/user.md
|
|
@ -139,9 +139,9 @@ secret, without ever sharing it outside of the cluster.
|
|||
At the moment it is not possible to define membership of the manifest role in
|
||||
other roles.
|
||||
|
||||
To define the secrets for the users in a different namespace than that of the cluster,
|
||||
one can set `enable_cross_namespace_secret` and declare the namespace for the
|
||||
secrets in the manifest in the following manner,
|
||||
To define the secrets for the users in a different namespace than that of the
|
||||
cluster, one can set `enable_cross_namespace_secret` and declare the namespace
|
||||
for the secrets in the manifest in the following manner,
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
|
|
@ -150,7 +150,8 @@ spec:
|
|||
appspace.db_user:
|
||||
- createdb
|
||||
```
|
||||
Here, anything before the first dot is taken as the namespace and the text after
|
||||
|
||||
Here, anything before the first dot is considered the namespace and the text after
|
||||
the first dot is the username. Also, the postgres roles of these usernames would
|
||||
be in the form of `namespace.username`.
|
||||
|
||||
|
|
@ -520,7 +521,7 @@ Then, the schemas are owned by the database owner, too.
|
|||
|
||||
The roles described in the previous paragraph can be granted to LOGIN roles from
|
||||
the `users` section in the manifest. Optionally, the Postgres Operator can also
|
||||
create default LOGIN roles for the database an each schema individually. These
|
||||
create default LOGIN roles for the database and each schema individually. These
|
||||
roles will get the `_user` suffix and they inherit all rights from their NOLOGIN
|
||||
counterparts. Therefore, you cannot have `defaultRoles` set to `false` and enable
|
||||
`defaultUsers` at the same time.
|
||||
|
|
@ -550,6 +551,19 @@ Default access privileges are also defined for LOGIN roles on database and
|
|||
schema creation. This means they are currently not set when `defaultUsers`
|
||||
(or `defaultRoles` for schemas) are enabled at a later point in time.
|
||||
|
||||
For all LOGIN roles the operator will create K8s secrets in the namespace
|
||||
specified in `secretNamespace`, if `enable_cross_namespace_secret` is set to
|
||||
`true` in the config. Otherwise, they are created in the same namespace like
|
||||
the Postgres cluster.
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
preparedDatabases:
|
||||
foo:
|
||||
defaultUsers: true
|
||||
secretNamespace: appspace
|
||||
```
|
||||
|
||||
### Schema `search_path` for default roles
|
||||
|
||||
The schema [`search_path`](https://www.postgresql.org/docs/13/ddl-schemas.html#DDL-SCHEMAS-PATH)
|
||||
|
|
@ -719,20 +733,21 @@ spec:
|
|||
uid: "efd12e58-5786-11e8-b5a7-06148230260c"
|
||||
cluster: "acid-batman"
|
||||
timestamp: "2017-12-19T12:40:33+01:00"
|
||||
s3_wal_path: "s3://<bucketname>/spilo/<source_db_cluster>/<UID>/wal/<PGVERSION>"
|
||||
```
|
||||
|
||||
Here `cluster` is a name of a source cluster that is going to be cloned. A new
|
||||
cluster will be cloned from S3, using the latest backup before the `timestamp`.
|
||||
Note, that a time zone is required for `timestamp` in the format of +00:00 which
|
||||
is UTC. The `uid` field is also mandatory. The operator will use it to find a
|
||||
correct key inside an S3 bucket. You can find this field in the metadata of the
|
||||
source cluster:
|
||||
is UTC. You can specify the `s3_wal_path` of the source cluster or let the
|
||||
operator try to find it based on the configured `wal_[s3|gs]_bucket` and the
|
||||
specified `uid`. You can find the UID of the source cluster in its metadata:
|
||||
|
||||
```yaml
|
||||
apiVersion: acid.zalan.do/v1
|
||||
kind: postgresql
|
||||
metadata:
|
||||
name: acid-test-cluster
|
||||
name: acid-batman
|
||||
uid: efd12e58-5786-11e8-b5a7-06148230260c
|
||||
```
|
||||
|
||||
|
|
@ -785,7 +800,7 @@ no statefulset will be created.
|
|||
```yaml
|
||||
spec:
|
||||
standby:
|
||||
s3_wal_path: "s3 bucket path to the master"
|
||||
s3_wal_path: "s3://<bucketname>/spilo/<source_db_cluster>/<UID>/wal/<PGVERSION>"
|
||||
```
|
||||
|
||||
At the moment, the operator only allows to stream from the WAL archive of the
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ metadata:
|
|||
# "delete-date": "2020-08-31" # can only be deleted on that day if "delete-date "key is configured
|
||||
# "delete-clustername": "acid-test-cluster" # can only be deleted when name matches if "delete-clustername" key is configured
|
||||
spec:
|
||||
dockerImage: registry.opensource.zalan.do/acid/spilo-13:2.0-p7
|
||||
dockerImage: registry.opensource.zalan.do/acid/spilo-13:2.1-p1
|
||||
teamId: "acid"
|
||||
numberOfInstances: 2
|
||||
users: # Application/Robot users
|
||||
|
|
@ -46,6 +46,12 @@ spec:
|
|||
# storageClass: my-sc
|
||||
# iops: 1000 # for EBS gp3
|
||||
# throughput: 250 # in MB/s for EBS gp3
|
||||
# selector:
|
||||
# matchExpressions:
|
||||
# - { key: flavour, operator: In, values: [ "banana", "chocolate" ] }
|
||||
# matchLabels:
|
||||
# environment: dev
|
||||
# service: postgres
|
||||
additionalVolumes:
|
||||
- name: empty
|
||||
mountPath: /opt/empty
|
||||
|
|
@ -151,7 +157,7 @@ spec:
|
|||
# - name: "telegraf-sidecar"
|
||||
# image: "telegraf:latest"
|
||||
# ports:
|
||||
# name: metrics
|
||||
# - name: metrics
|
||||
# containerPort: 8094
|
||||
# protocol: TCP
|
||||
# resources:
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ data:
|
|||
# connection_pooler_default_cpu_request: "500m"
|
||||
# connection_pooler_default_memory_limit: 100Mi
|
||||
# connection_pooler_default_memory_request: 100Mi
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-16"
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-18"
|
||||
# connection_pooler_max_db_connections: 60
|
||||
# connection_pooler_mode: "transaction"
|
||||
# connection_pooler_number_of_instances: 2
|
||||
|
|
@ -32,7 +32,7 @@ data:
|
|||
# default_memory_request: 100Mi
|
||||
# delete_annotation_date_key: delete-date
|
||||
# delete_annotation_name_key: delete-clustername
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p7
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.1-p1
|
||||
# downscaler_annotations: "deployment-time,downscaler/*"
|
||||
# enable_admin_role_for_users: "true"
|
||||
# enable_crd_validation: "true"
|
||||
|
|
@ -65,7 +65,7 @@ data:
|
|||
# inherited_labels: application,environment
|
||||
# kube_iam_role: ""
|
||||
# log_s3_bucket: ""
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.3"
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.7.0"
|
||||
# logical_backup_google_application_credentials: ""
|
||||
logical_backup_job_prefix: "logical-backup-"
|
||||
logical_backup_provider: "s3"
|
||||
|
|
@ -129,6 +129,7 @@ data:
|
|||
# team_api_role_configuration: "log_statement:all"
|
||||
# teams_api_url: http://fake-teams-api.default.svc.cluster.local
|
||||
# toleration: ""
|
||||
# wal_az_storage_account: ""
|
||||
# wal_gs_bucket: ""
|
||||
# wal_s3_bucket: ""
|
||||
watched_namespace: "*" # listen to all namespaces
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ spec:
|
|||
serviceAccountName: postgres-operator
|
||||
containers:
|
||||
- name: postgres-operator
|
||||
image: registry.opensource.zalan.do/acid/pgbouncer:master-16
|
||||
image: registry.opensource.zalan.do/acid/pgbouncer:master-18
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
|
|
|
|||
|
|
@ -61,7 +61,7 @@ spec:
|
|||
properties:
|
||||
docker_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/spilo-13:2.0-p7"
|
||||
default: "registry.opensource.zalan.do/acid/spilo-13:2.1-p1"
|
||||
enable_crd_validation:
|
||||
type: boolean
|
||||
default: true
|
||||
|
|
@ -384,6 +384,8 @@ spec:
|
|||
type: string
|
||||
log_s3_bucket:
|
||||
type: string
|
||||
wal_az_storage_account:
|
||||
type: string
|
||||
wal_gs_bucket:
|
||||
type: string
|
||||
wal_s3_bucket:
|
||||
|
|
@ -393,7 +395,7 @@ spec:
|
|||
properties:
|
||||
logical_backup_docker_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/logical-backup:v1.6.3"
|
||||
default: "registry.opensource.zalan.do/acid/logical-backup:v1.7.0"
|
||||
logical_backup_google_application_credentials:
|
||||
type: string
|
||||
logical_backup_job_prefix:
|
||||
|
|
@ -528,7 +530,7 @@ spec:
|
|||
default: "pooler"
|
||||
connection_pooler_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/pgbouncer:master-16"
|
||||
default: "registry.opensource.zalan.do/acid/pgbouncer:master-18"
|
||||
connection_pooler_max_db_connections:
|
||||
type: integer
|
||||
default: 60
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ spec:
|
|||
serviceAccountName: postgres-operator
|
||||
containers:
|
||||
- name: postgres-operator
|
||||
image: registry.opensource.zalan.do/acid/postgres-operator:v1.6.3
|
||||
image: registry.opensource.zalan.do/acid/postgres-operator:v1.7.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ kind: OperatorConfiguration
|
|||
metadata:
|
||||
name: postgresql-operator-default-configuration
|
||||
configuration:
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.0-p7
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-13:2.1-p1
|
||||
# enable_crd_validation: true
|
||||
# enable_lazy_spilo_upgrade: false
|
||||
enable_pgversion_env_var: true
|
||||
|
|
@ -121,10 +121,11 @@ configuration:
|
|||
# gcp_credentials: ""
|
||||
# kube_iam_role: ""
|
||||
# log_s3_bucket: ""
|
||||
# wal_az_storage_account: ""
|
||||
# wal_gs_bucket: ""
|
||||
# wal_s3_bucket: ""
|
||||
logical_backup:
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.6.3"
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.7.0"
|
||||
# logical_backup_google_application_credentials: ""
|
||||
logical_backup_job_prefix: "logical-backup-"
|
||||
logical_backup_provider: "s3"
|
||||
|
|
@ -165,7 +166,7 @@ configuration:
|
|||
connection_pooler_default_cpu_request: "500m"
|
||||
connection_pooler_default_memory_limit: 100Mi
|
||||
connection_pooler_default_memory_request: 100Mi
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-16"
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-18"
|
||||
# connection_pooler_max_db_connections: 60
|
||||
connection_pooler_mode: "transaction"
|
||||
connection_pooler_number_of_instances: 2
|
||||
|
|
|
|||
|
|
@ -390,6 +390,8 @@ spec:
|
|||
type: boolean
|
||||
defaultRoles:
|
||||
type: boolean
|
||||
secretNamespace:
|
||||
type: string
|
||||
replicaLoadBalancer: # deprecated
|
||||
type: boolean
|
||||
resources:
|
||||
|
|
@ -587,6 +589,24 @@ spec:
|
|||
properties:
|
||||
iops:
|
||||
type: integer
|
||||
selector:
|
||||
type: object
|
||||
properties:
|
||||
matchExpressions:
|
||||
type: array
|
||||
items:
|
||||
type: object
|
||||
properties:
|
||||
key:
|
||||
type: string
|
||||
operator:
|
||||
type: string
|
||||
values:
|
||||
type: array
|
||||
items:
|
||||
type: string
|
||||
matchLabels:
|
||||
type: object
|
||||
size:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
|
|
|
|||
|
|
@ -573,6 +573,9 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
},
|
||||
},
|
||||
},
|
||||
"secretNamespace": {
|
||||
Type: "string",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
@ -894,6 +897,54 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
"iops": {
|
||||
Type: "integer",
|
||||
},
|
||||
"selector": {
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"matchExpressions": {
|
||||
Type: "array",
|
||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||
Schema: &apiextv1.JSONSchemaProps{
|
||||
Type: "object",
|
||||
Required: []string{"key", "operator", "values"},
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"key": {
|
||||
Type: "string",
|
||||
},
|
||||
"operator": {
|
||||
Type: "string",
|
||||
Enum: []apiextv1.JSON{
|
||||
{
|
||||
Raw: []byte(`"In"`),
|
||||
},
|
||||
{
|
||||
Raw: []byte(`"NotIn"`),
|
||||
},
|
||||
{
|
||||
Raw: []byte(`"Exists"`),
|
||||
},
|
||||
{
|
||||
Raw: []byte(`"DoesNotExist"`),
|
||||
},
|
||||
},
|
||||
},
|
||||
"values": {
|
||||
Type: "array",
|
||||
Items: &apiextv1.JSONSchemaPropsOrArray{
|
||||
Schema: &apiextv1.JSONSchemaProps{
|
||||
Type: "string",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
"matchLabels": {
|
||||
Type: "object",
|
||||
XPreserveUnknownFields: util.True(),
|
||||
},
|
||||
},
|
||||
},
|
||||
"size": {
|
||||
Type: "string",
|
||||
Description: "Value must not be zero",
|
||||
|
|
|
|||
|
|
@ -132,6 +132,7 @@ type AWSGCPConfiguration struct {
|
|||
AWSRegion string `json:"aws_region,omitempty"`
|
||||
WALGSBucket string `json:"wal_gs_bucket,omitempty"`
|
||||
GCPCredentials string `json:"gcp_credentials,omitempty"`
|
||||
WALAZStorageAccount string `json:"wal_az_storage_account,omitempty"`
|
||||
LogS3Bucket string `json:"log_s3_bucket,omitempty"`
|
||||
KubeIAMRole string `json:"kube_iam_role,omitempty"`
|
||||
AdditionalSecretMount string `json:"additional_secret_mount,omitempty"`
|
||||
|
|
|
|||
|
|
@ -96,6 +96,7 @@ type PreparedDatabase struct {
|
|||
PreparedSchemas map[string]PreparedSchema `json:"schemas,omitempty"`
|
||||
DefaultUsers bool `json:"defaultUsers,omitempty" defaults:"false"`
|
||||
Extensions map[string]string `json:"extensions,omitempty"`
|
||||
SecretNamespace string `json:"secretNamespace,omitempty"`
|
||||
}
|
||||
|
||||
// PreparedSchema describes elements to be bootstrapped per schema
|
||||
|
|
@ -114,6 +115,7 @@ type MaintenanceWindow struct {
|
|||
|
||||
// Volume describes a single volume in the manifest.
|
||||
type Volume struct {
|
||||
Selector *metav1.LabelSelector `json:"selector,omitempty"`
|
||||
Size string `json:"size"`
|
||||
StorageClass string `json:"storageClass,omitempty"`
|
||||
SubPath string `json:"subPath,omitempty"`
|
||||
|
|
|
|||
|
|
@ -29,6 +29,7 @@ package v1
|
|||
import (
|
||||
config "github.com/zalando/postgres-operator/pkg/util/config"
|
||||
corev1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
|
|
@ -314,22 +315,6 @@ func (in *MaintenanceWindow) DeepCopy() *MaintenanceWindow {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *MajorVersionUpgradeConfiguration) DeepCopyInto(out *MajorVersionUpgradeConfiguration) {
|
||||
*out = *in
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MajorVersionUpgradeConfiguration.
|
||||
func (in *MajorVersionUpgradeConfiguration) DeepCopy() *MajorVersionUpgradeConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(MajorVersionUpgradeConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *OperatorConfiguration) DeepCopyInto(out *OperatorConfiguration) {
|
||||
*out = *in
|
||||
|
|
@ -385,7 +370,6 @@ func (in *OperatorConfigurationData) DeepCopyInto(out *OperatorConfigurationData
|
|||
}
|
||||
}
|
||||
out.PostgresUsersConfiguration = in.PostgresUsersConfiguration
|
||||
out.MajorVersionUpgrade = in.MajorVersionUpgrade
|
||||
in.Kubernetes.DeepCopyInto(&out.Kubernetes)
|
||||
out.PostgresPodResources = in.PostgresPodResources
|
||||
out.Timeouts = in.Timeouts
|
||||
|
|
@ -1234,6 +1218,11 @@ func (in UserFlags) DeepCopy() UserFlags {
|
|||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *Volume) DeepCopyInto(out *Volume) {
|
||||
*out = *in
|
||||
if in.Selector != nil {
|
||||
in, out := &in.Selector, &out.Selector
|
||||
*out = new(metav1.LabelSelector)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Iops != nil {
|
||||
in, out := &in.Iops, &out.Iops
|
||||
*out = new(int64)
|
||||
|
|
|
|||
|
|
@ -1019,9 +1019,9 @@ func (c *Cluster) initSystemUsers() {
|
|||
// Connection pooler user is an exception, if requested it's going to be
|
||||
// created by operator as a normal pgUser
|
||||
if needConnectionPooler(&c.Spec) {
|
||||
// initialize empty connection pooler if not done yet
|
||||
if c.Spec.ConnectionPooler == nil {
|
||||
c.Spec.ConnectionPooler = &acidv1.ConnectionPooler{}
|
||||
connectionPoolerSpec := c.Spec.ConnectionPooler
|
||||
if connectionPoolerSpec == nil {
|
||||
connectionPoolerSpec = &acidv1.ConnectionPooler{}
|
||||
}
|
||||
|
||||
// Using superuser as pooler user is not a good idea. First of all it's
|
||||
|
|
@ -1029,13 +1029,13 @@ func (c *Cluster) initSystemUsers() {
|
|||
// and second it's a bad practice.
|
||||
username := c.OpConfig.ConnectionPooler.User
|
||||
|
||||
isSuperUser := c.Spec.ConnectionPooler.User == c.OpConfig.SuperUsername
|
||||
isSuperUser := connectionPoolerSpec.User == c.OpConfig.SuperUsername
|
||||
isProtectedUser := c.shouldAvoidProtectedOrSystemRole(
|
||||
c.Spec.ConnectionPooler.User, "connection pool role")
|
||||
connectionPoolerSpec.User, "connection pool role")
|
||||
|
||||
if !isSuperUser && !isProtectedUser {
|
||||
username = util.Coalesce(
|
||||
c.Spec.ConnectionPooler.User,
|
||||
connectionPoolerSpec.User,
|
||||
c.OpConfig.ConnectionPooler.User)
|
||||
}
|
||||
|
||||
|
|
@ -1107,11 +1107,11 @@ func (c *Cluster) initPreparedDatabaseRoles() error {
|
|||
}
|
||||
|
||||
// default roles per database
|
||||
if err := c.initDefaultRoles(defaultRoles, "admin", preparedDbName, searchPath.String()); err != nil {
|
||||
if err := c.initDefaultRoles(defaultRoles, "admin", preparedDbName, searchPath.String(), preparedDB.SecretNamespace); err != nil {
|
||||
return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err)
|
||||
}
|
||||
if preparedDB.DefaultUsers {
|
||||
if err := c.initDefaultRoles(defaultUsers, "admin", preparedDbName, searchPath.String()); err != nil {
|
||||
if err := c.initDefaultRoles(defaultUsers, "admin", preparedDbName, searchPath.String(), preparedDB.SecretNamespace); err != nil {
|
||||
return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err)
|
||||
}
|
||||
}
|
||||
|
|
@ -1122,14 +1122,14 @@ func (c *Cluster) initPreparedDatabaseRoles() error {
|
|||
if err := c.initDefaultRoles(defaultRoles,
|
||||
preparedDbName+constants.OwnerRoleNameSuffix,
|
||||
preparedDbName+"_"+preparedSchemaName,
|
||||
constants.DefaultSearchPath+", "+preparedSchemaName); err != nil {
|
||||
constants.DefaultSearchPath+", "+preparedSchemaName, preparedDB.SecretNamespace); err != nil {
|
||||
return fmt.Errorf("could not initialize default roles for database schema %s: %v", preparedSchemaName, err)
|
||||
}
|
||||
if preparedSchema.DefaultUsers {
|
||||
if err := c.initDefaultRoles(defaultUsers,
|
||||
preparedDbName+constants.OwnerRoleNameSuffix,
|
||||
preparedDbName+"_"+preparedSchemaName,
|
||||
constants.DefaultSearchPath+", "+preparedSchemaName); err != nil {
|
||||
constants.DefaultSearchPath+", "+preparedSchemaName, preparedDB.SecretNamespace); err != nil {
|
||||
return fmt.Errorf("could not initialize default users for database schema %s: %v", preparedSchemaName, err)
|
||||
}
|
||||
}
|
||||
|
|
@ -1139,10 +1139,19 @@ func (c *Cluster) initPreparedDatabaseRoles() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) initDefaultRoles(defaultRoles map[string]string, admin, prefix string, searchPath string) error {
|
||||
func (c *Cluster) initDefaultRoles(defaultRoles map[string]string, admin, prefix, searchPath, secretNamespace string) error {
|
||||
|
||||
for defaultRole, inherits := range defaultRoles {
|
||||
|
||||
namespace := c.Namespace
|
||||
//if namespaced secrets are allowed
|
||||
if secretNamespace != "" {
|
||||
if c.Config.OpConfig.EnableCrossNamespaceSecret {
|
||||
namespace = secretNamespace
|
||||
} else {
|
||||
c.logger.Warn("secretNamespace ignored because enable_cross_namespace_secret set to false. Creating secrets in cluster namespace.")
|
||||
}
|
||||
}
|
||||
roleName := prefix + defaultRole
|
||||
|
||||
flags := []string{constants.RoleFlagNoLogin}
|
||||
|
|
@ -1165,7 +1174,7 @@ func (c *Cluster) initDefaultRoles(defaultRoles map[string]string, admin, prefix
|
|||
newRole := spec.PgUser{
|
||||
Origin: spec.RoleOriginBootstrap,
|
||||
Name: roleName,
|
||||
Namespace: c.Namespace,
|
||||
Namespace: namespace,
|
||||
Password: util.RandomPassword(constants.PasswordLength),
|
||||
Flags: flags,
|
||||
MemberOf: memberOf,
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package cluster
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/r3labs/diff"
|
||||
|
|
@ -60,7 +61,7 @@ func needMasterConnectionPooler(spec *acidv1.PostgresSpec) bool {
|
|||
}
|
||||
|
||||
func needMasterConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool {
|
||||
return (nil != spec.EnableConnectionPooler && *spec.EnableConnectionPooler) ||
|
||||
return (spec.EnableConnectionPooler != nil && *spec.EnableConnectionPooler) ||
|
||||
(spec.ConnectionPooler != nil && spec.EnableConnectionPooler == nil)
|
||||
}
|
||||
|
||||
|
|
@ -114,7 +115,7 @@ func (c *Cluster) createConnectionPooler(LookupFunction InstallFunction) (SyncRe
|
|||
c.setProcessName("creating connection pooler")
|
||||
|
||||
//this is essentially sync with nil as oldSpec
|
||||
if reason, err := c.syncConnectionPooler(nil, &c.Postgresql, LookupFunction); err != nil {
|
||||
if reason, err := c.syncConnectionPooler(&acidv1.Postgresql{}, &c.Postgresql, LookupFunction); err != nil {
|
||||
return reason, err
|
||||
}
|
||||
return reason, nil
|
||||
|
|
@ -140,11 +141,15 @@ func (c *Cluster) createConnectionPooler(LookupFunction InstallFunction) (SyncRe
|
|||
// RESERVE_SIZE is how many additional connections to allow for a pooler.
|
||||
func (c *Cluster) getConnectionPoolerEnvVars() []v1.EnvVar {
|
||||
spec := &c.Spec
|
||||
connectionPoolerSpec := spec.ConnectionPooler
|
||||
if connectionPoolerSpec == nil {
|
||||
connectionPoolerSpec = &acidv1.ConnectionPooler{}
|
||||
}
|
||||
effectiveMode := util.Coalesce(
|
||||
spec.ConnectionPooler.Mode,
|
||||
connectionPoolerSpec.Mode,
|
||||
c.OpConfig.ConnectionPooler.Mode)
|
||||
|
||||
numberOfInstances := spec.ConnectionPooler.NumberOfInstances
|
||||
numberOfInstances := connectionPoolerSpec.NumberOfInstances
|
||||
if numberOfInstances == nil {
|
||||
numberOfInstances = util.CoalesceInt32(
|
||||
c.OpConfig.ConnectionPooler.NumberOfInstances,
|
||||
|
|
@ -152,7 +157,7 @@ func (c *Cluster) getConnectionPoolerEnvVars() []v1.EnvVar {
|
|||
}
|
||||
|
||||
effectiveMaxDBConn := util.CoalesceInt32(
|
||||
spec.ConnectionPooler.MaxDBConnections,
|
||||
connectionPoolerSpec.MaxDBConnections,
|
||||
c.OpConfig.ConnectionPooler.MaxDBConnections)
|
||||
|
||||
if effectiveMaxDBConn == nil {
|
||||
|
|
@ -201,17 +206,21 @@ func (c *Cluster) getConnectionPoolerEnvVars() []v1.EnvVar {
|
|||
func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
|
||||
*v1.PodTemplateSpec, error) {
|
||||
spec := &c.Spec
|
||||
connectionPoolerSpec := spec.ConnectionPooler
|
||||
if connectionPoolerSpec == nil {
|
||||
connectionPoolerSpec = &acidv1.ConnectionPooler{}
|
||||
}
|
||||
gracePeriod := int64(c.OpConfig.PodTerminateGracePeriod.Seconds())
|
||||
resources, err := generateResourceRequirements(
|
||||
spec.ConnectionPooler.Resources,
|
||||
connectionPoolerSpec.Resources,
|
||||
makeDefaultConnectionPoolerResources(&c.OpConfig))
|
||||
|
||||
effectiveDockerImage := util.Coalesce(
|
||||
spec.ConnectionPooler.DockerImage,
|
||||
connectionPoolerSpec.DockerImage,
|
||||
c.OpConfig.ConnectionPooler.Image)
|
||||
|
||||
effectiveSchema := util.Coalesce(
|
||||
spec.ConnectionPooler.Schema,
|
||||
connectionPoolerSpec.Schema,
|
||||
c.OpConfig.ConnectionPooler.Schema)
|
||||
|
||||
if err != nil {
|
||||
|
|
@ -220,7 +229,7 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
|
|||
|
||||
secretSelector := func(key string) *v1.SecretKeySelector {
|
||||
effectiveUser := util.Coalesce(
|
||||
spec.ConnectionPooler.User,
|
||||
connectionPoolerSpec.User,
|
||||
c.OpConfig.ConnectionPooler.User)
|
||||
|
||||
return &v1.SecretKeySelector{
|
||||
|
|
@ -285,6 +294,8 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
|
|||
},
|
||||
}
|
||||
|
||||
tolerationsSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration)
|
||||
|
||||
podTemplate := &v1.PodTemplateSpec{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Labels: c.connectionPoolerLabels(role, true).MatchLabels,
|
||||
|
|
@ -294,12 +305,18 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
|
|||
Spec: v1.PodSpec{
|
||||
TerminationGracePeriodSeconds: &gracePeriod,
|
||||
Containers: []v1.Container{poolerContainer},
|
||||
// TODO: add tolerations to scheduler pooler on the same node
|
||||
// as database
|
||||
//Tolerations: *tolerationsSpec,
|
||||
Tolerations: tolerationsSpec,
|
||||
},
|
||||
}
|
||||
|
||||
nodeAffinity := nodeAffinity(c.OpConfig.NodeReadinessLabel, spec.NodeAffinity)
|
||||
if c.OpConfig.EnablePodAntiAffinity {
|
||||
labelsSet := labels.Set(c.connectionPoolerLabels(role, false).MatchLabels)
|
||||
podTemplate.Spec.Affinity = generatePodAffinity(labelsSet, c.OpConfig.PodAntiAffinityTopologyKey, nodeAffinity)
|
||||
} else if nodeAffinity != nil {
|
||||
podTemplate.Spec.Affinity = nodeAffinity
|
||||
}
|
||||
|
||||
return podTemplate, nil
|
||||
}
|
||||
|
||||
|
|
@ -313,12 +330,13 @@ func (c *Cluster) generateConnectionPoolerDeployment(connectionPooler *Connectio
|
|||
// default values, initialize it to an empty structure. It could be done
|
||||
// anywhere, but here is the earliest common entry point between sync and
|
||||
// create code, so init here.
|
||||
if spec.ConnectionPooler == nil {
|
||||
spec.ConnectionPooler = &acidv1.ConnectionPooler{}
|
||||
connectionPoolerSpec := spec.ConnectionPooler
|
||||
if connectionPoolerSpec == nil {
|
||||
connectionPoolerSpec = &acidv1.ConnectionPooler{}
|
||||
}
|
||||
podTemplate, err := c.generateConnectionPoolerPodTemplate(connectionPooler.Role)
|
||||
|
||||
numberOfInstances := spec.ConnectionPooler.NumberOfInstances
|
||||
numberOfInstances := connectionPoolerSpec.NumberOfInstances
|
||||
if numberOfInstances == nil {
|
||||
numberOfInstances = util.CoalesceInt32(
|
||||
c.OpConfig.ConnectionPooler.NumberOfInstances,
|
||||
|
|
@ -363,16 +381,6 @@ func (c *Cluster) generateConnectionPoolerDeployment(connectionPooler *Connectio
|
|||
func (c *Cluster) generateConnectionPoolerService(connectionPooler *ConnectionPoolerObjects) *v1.Service {
|
||||
|
||||
spec := &c.Spec
|
||||
// there are two ways to enable connection pooler, either to specify a
|
||||
// connectionPooler section or enableConnectionPooler. In the second case
|
||||
// spec.connectionPooler will be nil, so to make it easier to calculate
|
||||
// default values, initialize it to an empty structure. It could be done
|
||||
// anywhere, but here is the earliest common entry point between sync and
|
||||
// create code, so init here.
|
||||
if spec.ConnectionPooler == nil {
|
||||
spec.ConnectionPooler = &acidv1.ConnectionPooler{}
|
||||
}
|
||||
|
||||
serviceSpec := v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
|
|
@ -660,12 +668,14 @@ func makeDefaultConnectionPoolerResources(config *config.Config) acidv1.Resource
|
|||
|
||||
func logPoolerEssentials(log *logrus.Entry, oldSpec, newSpec *acidv1.Postgresql) {
|
||||
var v []string
|
||||
|
||||
var input []*bool
|
||||
|
||||
newMasterConnectionPoolerEnabled := needMasterConnectionPoolerWorker(&newSpec.Spec)
|
||||
if oldSpec == nil {
|
||||
input = []*bool{nil, nil, newSpec.Spec.EnableConnectionPooler, newSpec.Spec.EnableReplicaConnectionPooler}
|
||||
input = []*bool{nil, nil, &newMasterConnectionPoolerEnabled, newSpec.Spec.EnableReplicaConnectionPooler}
|
||||
} else {
|
||||
input = []*bool{oldSpec.Spec.EnableConnectionPooler, oldSpec.Spec.EnableReplicaConnectionPooler, newSpec.Spec.EnableConnectionPooler, newSpec.Spec.EnableReplicaConnectionPooler}
|
||||
oldMasterConnectionPoolerEnabled := needMasterConnectionPoolerWorker(&oldSpec.Spec)
|
||||
input = []*bool{&oldMasterConnectionPoolerEnabled, oldSpec.Spec.EnableReplicaConnectionPooler, &newMasterConnectionPoolerEnabled, newSpec.Spec.EnableReplicaConnectionPooler}
|
||||
}
|
||||
|
||||
for _, b := range input {
|
||||
|
|
@ -676,25 +686,16 @@ func logPoolerEssentials(log *logrus.Entry, oldSpec, newSpec *acidv1.Postgresql)
|
|||
}
|
||||
}
|
||||
|
||||
log.Debugf("syncing connection pooler from (%v, %v) to (%v, %v)", v[0], v[1], v[2], v[3])
|
||||
log.Debugf("syncing connection pooler (master, replica) from (%v, %v) to (%v, %v)", v[0], v[1], v[2], v[3])
|
||||
}
|
||||
|
||||
func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, LookupFunction InstallFunction) (SyncReason, error) {
|
||||
|
||||
var reason SyncReason
|
||||
var err error
|
||||
var newNeedConnectionPooler, oldNeedConnectionPooler bool
|
||||
oldNeedConnectionPooler = false
|
||||
var connectionPoolerNeeded bool
|
||||
|
||||
if oldSpec == nil {
|
||||
oldSpec = &acidv1.Postgresql{
|
||||
Spec: acidv1.PostgresSpec{
|
||||
ConnectionPooler: &acidv1.ConnectionPooler{},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
needSync, _ := needSyncConnectionPoolerSpecs(oldSpec.Spec.ConnectionPooler, newSpec.Spec.ConnectionPooler, c.logger)
|
||||
needSync := !reflect.DeepEqual(oldSpec.Spec.ConnectionPooler, newSpec.Spec.ConnectionPooler)
|
||||
masterChanges, err := diff.Diff(oldSpec.Spec.EnableConnectionPooler, newSpec.Spec.EnableConnectionPooler)
|
||||
if err != nil {
|
||||
c.logger.Error("Error in getting diff of master connection pooler changes")
|
||||
|
|
@ -704,15 +705,14 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, Look
|
|||
c.logger.Error("Error in getting diff of replica connection pooler changes")
|
||||
}
|
||||
|
||||
// skip pooler sync only
|
||||
// 1. if there is no diff in spec, AND
|
||||
// 2. if connection pooler is already there and is also required as per newSpec
|
||||
//
|
||||
// Handling the case when connectionPooler is not there but it is required
|
||||
// skip pooler sync when theres no diff or it's deactivated
|
||||
// but, handling the case when connectionPooler is not there but it is required
|
||||
// as per spec, hence do not skip syncing in that case, even though there
|
||||
// is no diff in specs
|
||||
if (!needSync && len(masterChanges) <= 0 && len(replicaChanges) <= 0) &&
|
||||
(c.ConnectionPooler != nil && (needConnectionPooler(&newSpec.Spec))) {
|
||||
((!needConnectionPooler(&newSpec.Spec) && (c.ConnectionPooler == nil || !needConnectionPooler(&oldSpec.Spec))) ||
|
||||
(c.ConnectionPooler != nil && needConnectionPooler(&newSpec.Spec) &&
|
||||
(c.ConnectionPooler[Master].LookupFunction || c.ConnectionPooler[Replica].LookupFunction))) {
|
||||
c.logger.Debugln("syncing pooler is not required")
|
||||
return nil, nil
|
||||
}
|
||||
|
|
@ -723,15 +723,9 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, Look
|
|||
for _, role := range [2]PostgresRole{Master, Replica} {
|
||||
|
||||
if role == Master {
|
||||
newNeedConnectionPooler = needMasterConnectionPoolerWorker(&newSpec.Spec)
|
||||
if oldSpec != nil {
|
||||
oldNeedConnectionPooler = needMasterConnectionPoolerWorker(&oldSpec.Spec)
|
||||
}
|
||||
connectionPoolerNeeded = needMasterConnectionPoolerWorker(&newSpec.Spec)
|
||||
} else {
|
||||
newNeedConnectionPooler = needReplicaConnectionPoolerWorker(&newSpec.Spec)
|
||||
if oldSpec != nil {
|
||||
oldNeedConnectionPooler = needReplicaConnectionPoolerWorker(&oldSpec.Spec)
|
||||
}
|
||||
connectionPoolerNeeded = needReplicaConnectionPoolerWorker(&newSpec.Spec)
|
||||
}
|
||||
|
||||
// if the call is via createConnectionPooler, then it is required to initialize
|
||||
|
|
@ -751,24 +745,22 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, Look
|
|||
}
|
||||
}
|
||||
|
||||
if newNeedConnectionPooler {
|
||||
if connectionPoolerNeeded {
|
||||
// Try to sync in any case. If we didn't needed connection pooler before,
|
||||
// it means we want to create it. If it was already present, still sync
|
||||
// since it could happen that there is no difference in specs, and all
|
||||
// the resources are remembered, but the deployment was manually deleted
|
||||
// in between
|
||||
|
||||
// in this case also do not forget to install lookup function as for
|
||||
// creating cluster
|
||||
if !oldNeedConnectionPooler || !c.ConnectionPooler[role].LookupFunction {
|
||||
newConnectionPooler := newSpec.Spec.ConnectionPooler
|
||||
|
||||
// in this case also do not forget to install lookup function
|
||||
if !c.ConnectionPooler[role].LookupFunction {
|
||||
connectionPooler := c.Spec.ConnectionPooler
|
||||
specSchema := ""
|
||||
specUser := ""
|
||||
|
||||
if newConnectionPooler != nil {
|
||||
specSchema = newConnectionPooler.Schema
|
||||
specUser = newConnectionPooler.User
|
||||
if connectionPooler != nil {
|
||||
specSchema = connectionPooler.Schema
|
||||
specUser = connectionPooler.User
|
||||
}
|
||||
|
||||
schema := util.Coalesce(
|
||||
|
|
@ -779,9 +771,10 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, Look
|
|||
specUser,
|
||||
c.OpConfig.ConnectionPooler.User)
|
||||
|
||||
if err = LookupFunction(schema, user, role); err != nil {
|
||||
if err = LookupFunction(schema, user); err != nil {
|
||||
return NoSync, err
|
||||
}
|
||||
c.ConnectionPooler[role].LookupFunction = true
|
||||
}
|
||||
|
||||
if reason, err = c.syncConnectionPoolerWorker(oldSpec, newSpec, role); err != nil {
|
||||
|
|
@ -800,8 +793,8 @@ func (c *Cluster) syncConnectionPooler(oldSpec, newSpec *acidv1.Postgresql, Look
|
|||
}
|
||||
}
|
||||
}
|
||||
if !needMasterConnectionPoolerWorker(&newSpec.Spec) &&
|
||||
!needReplicaConnectionPoolerWorker(&newSpec.Spec) {
|
||||
if (needMasterConnectionPoolerWorker(&oldSpec.Spec) || needReplicaConnectionPoolerWorker(&oldSpec.Spec)) &&
|
||||
!needMasterConnectionPoolerWorker(&newSpec.Spec) && !needReplicaConnectionPoolerWorker(&newSpec.Spec) {
|
||||
if err = c.deleteConnectionPoolerSecret(); err != nil {
|
||||
c.logger.Warningf("could not remove connection pooler secret: %v", err)
|
||||
}
|
||||
|
|
@ -866,8 +859,6 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql
|
|||
newConnectionPooler = &acidv1.ConnectionPooler{}
|
||||
}
|
||||
|
||||
c.logger.Infof("old: %+v, new %+v", oldConnectionPooler, newConnectionPooler)
|
||||
|
||||
var specSync bool
|
||||
var specReason []string
|
||||
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ import (
|
|||
"k8s.io/client-go/kubernetes/fake"
|
||||
)
|
||||
|
||||
func mockInstallLookupFunction(schema string, user string, role PostgresRole) error {
|
||||
func mockInstallLookupFunction(schema string, user string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -508,7 +508,7 @@ func (c *Cluster) execCreateOrAlterExtension(extName, schemaName, statement, doi
|
|||
|
||||
// Creates a connection pool credentials lookup function in every database to
|
||||
// perform remote authentication.
|
||||
func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string, role PostgresRole) error {
|
||||
func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string) error {
|
||||
var stmtBytes bytes.Buffer
|
||||
|
||||
c.logger.Info("Installing lookup function")
|
||||
|
|
@ -604,8 +604,8 @@ func (c *Cluster) installLookupFunction(poolerSchema, poolerUser string, role Po
|
|||
c.logger.Infof("pooler lookup function installed into %s", dbname)
|
||||
}
|
||||
|
||||
if len(failedDatabases) == 0 {
|
||||
c.ConnectionPooler[role].LookupFunction = true
|
||||
if len(failedDatabases) > 0 {
|
||||
return fmt.Errorf("could not install pooler lookup function in every specified databases")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -798,6 +798,12 @@ func (c *Cluster) generateSpiloPodEnvVars(uid types.UID, spiloConfiguration stri
|
|||
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""})
|
||||
}
|
||||
|
||||
if c.OpConfig.WALAZStorageAccount != "" {
|
||||
envVars = append(envVars, v1.EnvVar{Name: "AZURE_STORAGE_ACCOUNT", Value: c.OpConfig.WALAZStorageAccount})
|
||||
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_SUFFIX", Value: getBucketScopeSuffix(string(uid))})
|
||||
envVars = append(envVars, v1.EnvVar{Name: "WAL_BUCKET_SCOPE_PREFIX", Value: ""})
|
||||
}
|
||||
|
||||
if c.OpConfig.GCPCredentials != "" {
|
||||
envVars = append(envVars, v1.EnvVar{Name: "GOOGLE_APPLICATION_CREDENTIALS", Value: c.OpConfig.GCPCredentials})
|
||||
}
|
||||
|
|
@ -1170,9 +1176,6 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
}
|
||||
|
||||
// generate the spilo container
|
||||
c.logger.Debugf("Generating Spilo container, environment variables")
|
||||
c.logger.Debugf("%v", spiloEnvVars)
|
||||
|
||||
spiloContainer := generateContainer(constants.PostgresContainerName,
|
||||
&effectiveDockerImage,
|
||||
resourceRequirements,
|
||||
|
|
@ -1275,7 +1278,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
}
|
||||
|
||||
if volumeClaimTemplate, err = generatePersistentVolumeClaimTemplate(spec.Volume.Size,
|
||||
spec.Volume.StorageClass); err != nil {
|
||||
spec.Volume.StorageClass, spec.Volume.Selector); err != nil {
|
||||
return nil, fmt.Errorf("could not generate volume claim template: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -1523,7 +1526,8 @@ func (c *Cluster) addAdditionalVolumes(podSpec *v1.PodSpec,
|
|||
podSpec.Volumes = volumes
|
||||
}
|
||||
|
||||
func generatePersistentVolumeClaimTemplate(volumeSize, volumeStorageClass string) (*v1.PersistentVolumeClaim, error) {
|
||||
func generatePersistentVolumeClaimTemplate(volumeSize, volumeStorageClass string,
|
||||
volumeSelector *metav1.LabelSelector) (*v1.PersistentVolumeClaim, error) {
|
||||
|
||||
var storageClassName *string
|
||||
|
||||
|
|
@ -1556,6 +1560,7 @@ func generatePersistentVolumeClaimTemplate(volumeSize, volumeStorageClass string
|
|||
},
|
||||
StorageClassName: storageClassName,
|
||||
VolumeMode: &volumeMode,
|
||||
Selector: volumeSelector,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -1806,6 +1811,14 @@ func (c *Cluster) generateCloneEnvironment(description *acidv1.CloneDescription)
|
|||
},
|
||||
}
|
||||
result = append(result, envs...)
|
||||
} else if c.OpConfig.WALAZStorageAccount != "" {
|
||||
envs := []v1.EnvVar{
|
||||
{
|
||||
Name: "CLONE_AZURE_STORAGE_ACCOUNT",
|
||||
Value: c.OpConfig.WALAZStorageAccount,
|
||||
},
|
||||
}
|
||||
result = append(result, envs...)
|
||||
} else {
|
||||
c.logger.Error("Cannot figure out S3 or GS bucket. Both are empty.")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1509,3 +1509,106 @@ func TestGenerateCapabilities(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestVolumeSelector(t *testing.T) {
|
||||
testName := "TestVolumeSelector"
|
||||
makeSpec := func(volume acidv1.Volume) acidv1.PostgresSpec {
|
||||
return acidv1.PostgresSpec{
|
||||
TeamID: "myapp",
|
||||
NumberOfInstances: 0,
|
||||
Resources: acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
},
|
||||
Volume: volume,
|
||||
}
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
subTest string
|
||||
volume acidv1.Volume
|
||||
wantSelector *metav1.LabelSelector
|
||||
}{
|
||||
{
|
||||
subTest: "PVC template has no selector",
|
||||
volume: acidv1.Volume{
|
||||
Size: "1G",
|
||||
},
|
||||
wantSelector: nil,
|
||||
},
|
||||
{
|
||||
subTest: "PVC template has simple label selector",
|
||||
volume: acidv1.Volume{
|
||||
Size: "1G",
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"environment": "unittest"},
|
||||
},
|
||||
},
|
||||
wantSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"environment": "unittest"},
|
||||
},
|
||||
},
|
||||
{
|
||||
subTest: "PVC template has full selector",
|
||||
volume: acidv1.Volume{
|
||||
Size: "1G",
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"environment": "unittest"},
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "flavour",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"banana", "chocolate"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
wantSelector: &metav1.LabelSelector{
|
||||
MatchLabels: map[string]string{"environment": "unittest"},
|
||||
MatchExpressions: []metav1.LabelSelectorRequirement{
|
||||
{
|
||||
Key: "flavour",
|
||||
Operator: metav1.LabelSelectorOpIn,
|
||||
Values: []string{"banana", "chocolate"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
cluster := New(
|
||||
Config{
|
||||
OpConfig: config.Config{
|
||||
PodManagementPolicy: "ordered_ready",
|
||||
ProtectedRoles: []string{"admin"},
|
||||
Auth: config.Auth{
|
||||
SuperUsername: superUserName,
|
||||
ReplicationUsername: replicationUserName,
|
||||
},
|
||||
},
|
||||
}, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder)
|
||||
|
||||
for _, tt := range tests {
|
||||
pgSpec := makeSpec(tt.volume)
|
||||
sts, err := cluster.generateStatefulSet(&pgSpec)
|
||||
if err != nil {
|
||||
t.Fatalf("%s %s: no statefulset created %v", testName, tt.subTest, err)
|
||||
}
|
||||
|
||||
volIdx := len(sts.Spec.VolumeClaimTemplates)
|
||||
for i, ct := range sts.Spec.VolumeClaimTemplates {
|
||||
if ct.ObjectMeta.Name == constants.DataVolumeName {
|
||||
volIdx = i
|
||||
break
|
||||
}
|
||||
}
|
||||
if volIdx == len(sts.Spec.VolumeClaimTemplates) {
|
||||
t.Errorf("%s %s: no datavolume found in sts", testName, tt.subTest)
|
||||
}
|
||||
|
||||
selector := sts.Spec.VolumeClaimTemplates[volIdx].Spec.Selector
|
||||
if !reflect.DeepEqual(selector, tt.wantSelector) {
|
||||
t.Errorf("%s %s: expected: %#v but got: %#v", testName, tt.subTest, tt.wantSelector, selector)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -746,6 +746,15 @@ func (c *Cluster) syncDatabases() error {
|
|||
}
|
||||
}
|
||||
|
||||
if len(createDatabases) > 0 {
|
||||
// trigger creation of pooler objects in new database in syncConnectionPooler
|
||||
if c.ConnectionPooler != nil {
|
||||
for _, role := range [2]PostgresRole{Master, Replica} {
|
||||
c.ConnectionPooler[role].LookupFunction = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// set default privileges for prepared database
|
||||
for _, preparedDatabase := range preparedDatabases {
|
||||
if err := c.initDbConnWithName(preparedDatabase); err != nil {
|
||||
|
|
|
|||
|
|
@ -72,7 +72,7 @@ type ClusterStatus struct {
|
|||
|
||||
type TemplateParams map[string]interface{}
|
||||
|
||||
type InstallFunction func(schema string, user string, role PostgresRole) error
|
||||
type InstallFunction func(schema string, user string) error
|
||||
|
||||
type SyncReason []string
|
||||
|
||||
|
|
|
|||
|
|
@ -39,7 +39,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.EnableSpiloWalPathCompat = fromCRD.EnableSpiloWalPathCompat
|
||||
result.EtcdHost = fromCRD.EtcdHost
|
||||
result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps
|
||||
result.DockerImage = util.Coalesce(fromCRD.DockerImage, "registry.opensource.zalan.do/acid/spilo-13:2.0-p7")
|
||||
result.DockerImage = util.Coalesce(fromCRD.DockerImage, "registry.opensource.zalan.do/acid/spilo-13:2.1-p1")
|
||||
result.Workers = util.CoalesceUInt32(fromCRD.Workers, 8)
|
||||
result.MinInstances = fromCRD.MinInstances
|
||||
result.MaxInstances = fromCRD.MaxInstances
|
||||
|
|
@ -146,6 +146,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.KubeIAMRole = fromCRD.AWSGCP.KubeIAMRole
|
||||
result.WALGSBucket = fromCRD.AWSGCP.WALGSBucket
|
||||
result.GCPCredentials = fromCRD.AWSGCP.GCPCredentials
|
||||
result.WALAZStorageAccount = fromCRD.AWSGCP.WALAZStorageAccount
|
||||
result.AdditionalSecretMount = fromCRD.AWSGCP.AdditionalSecretMount
|
||||
result.AdditionalSecretMountPath = util.Coalesce(fromCRD.AWSGCP.AdditionalSecretMountPath, "/meta/credentials")
|
||||
result.EnableEBSGp3Migration = fromCRD.AWSGCP.EnableEBSGp3Migration
|
||||
|
|
@ -153,7 +154,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
|
||||
// logical backup config
|
||||
result.LogicalBackupSchedule = util.Coalesce(fromCRD.LogicalBackup.Schedule, "30 00 * * *")
|
||||
result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "registry.opensource.zalan.do/acid/logical-backup:v1.6.3")
|
||||
result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "registry.opensource.zalan.do/acid/logical-backup:v1.7.0")
|
||||
result.LogicalBackupProvider = util.Coalesce(fromCRD.LogicalBackup.BackupProvider, "s3")
|
||||
result.LogicalBackupS3Bucket = fromCRD.LogicalBackup.S3Bucket
|
||||
result.LogicalBackupS3Region = fromCRD.LogicalBackup.S3Region
|
||||
|
|
|
|||
|
|
@ -114,7 +114,7 @@ type Scalyr struct {
|
|||
// LogicalBackup defines configuration for logical backup
|
||||
type LogicalBackup struct {
|
||||
LogicalBackupSchedule string `name:"logical_backup_schedule" default:"30 00 * * *"`
|
||||
LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"registry.opensource.zalan.do/acid/logical-backup:v1.6.3"`
|
||||
LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"registry.opensource.zalan.do/acid/logical-backup:v1.7.0"`
|
||||
LogicalBackupProvider string `name:"logical_backup_provider" default:"s3"`
|
||||
LogicalBackupS3Bucket string `name:"logical_backup_s3_bucket" default:""`
|
||||
LogicalBackupS3Region string `name:"logical_backup_s3_region" default:""`
|
||||
|
|
@ -152,7 +152,7 @@ type Config struct {
|
|||
WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to'
|
||||
KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"`
|
||||
EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS
|
||||
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-13:2.0-p7"`
|
||||
DockerImage string `name:"docker_image" default:"registry.opensource.zalan.do/acid/spilo-13:2.1-p1"`
|
||||
SidecarImages map[string]string `name:"sidecar_docker_images"` // deprecated in favour of SidecarContainers
|
||||
SidecarContainers []v1.Container `name:"sidecars"`
|
||||
PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"`
|
||||
|
|
@ -167,6 +167,7 @@ type Config struct {
|
|||
KubeIAMRole string `name:"kube_iam_role"`
|
||||
WALGSBucket string `name:"wal_gs_bucket"`
|
||||
GCPCredentials string `name:"gcp_credentials"`
|
||||
WALAZStorageAccount string `name:"wal_az_storage_account"`
|
||||
AdditionalSecretMount string `name:"additional_secret_mount"`
|
||||
AdditionalSecretMountPath string `name:"additional_secret_mount_path" default:"/meta/credentials"`
|
||||
EnableEBSGp3Migration bool `name:"enable_ebs_gp3_migration" default:"false"`
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
package httpclient
|
||||
|
||||
//go:generate mockgen -package mocks -destination=$PWD/mocks/$GOFILE -source=$GOFILE -build_flags=-mod=vendor
|
||||
//go:generate mockgen -package mocks -destination=../../../mocks/$GOFILE -source=$GOFILE -build_flags=-mod=vendor
|
||||
|
||||
import "net/http"
|
||||
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
package volumes
|
||||
|
||||
//go:generate mockgen -package mocks -destination=$PWD/mocks/$GOFILE -source=$GOFILE -build_flags=-mod=vendor
|
||||
//go:generate mockgen -package mocks -destination=../../../mocks/$GOFILE -source=$GOFILE -build_flags=-mod=vendor
|
||||
|
||||
import v1 "k8s.io/api/core/v1"
|
||||
|
||||
|
|
|
|||
|
|
@ -71,3 +71,25 @@ spec:
|
|||
"11"
|
||||
]
|
||||
}
|
||||
# Exemple of settings to make snapshot view working in the ui when using AWS
|
||||
# - name: WALE_S3_ENDPOINT
|
||||
# value: https+path://s3.us-east-1.amazonaws.com:443
|
||||
# - name: SPILO_S3_BACKUP_PREFIX
|
||||
# value: spilo/
|
||||
# - name: AWS_ACCESS_KEY_ID
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: <postgres operator secret with AWS token>
|
||||
# key: AWS_ACCESS_KEY_ID
|
||||
# - name: AWS_SECRET_ACCESS_KEY
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: <postgres operator secret with AWS token>
|
||||
# key: AWS_SECRET_ACCESS_KEY
|
||||
# - name: AWS_DEFAULT_REGION
|
||||
# valueFrom:
|
||||
# secretKeyRef:
|
||||
# name: <postgres operator secret with AWS token>
|
||||
# key: AWS_DEFAULT_REGION
|
||||
# - name: SPILO_S3_BACKUP_BUCKET
|
||||
# value: <s3 bucket used by the operator>
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
apiVersion: "networking.k8s.io/v1beta1"
|
||||
apiVersion: "networking.k8s.io/v1"
|
||||
kind: "Ingress"
|
||||
metadata:
|
||||
name: "postgres-operator-ui"
|
||||
|
|
@ -10,6 +10,10 @@ spec:
|
|||
- host: "ui.example.org"
|
||||
http:
|
||||
paths:
|
||||
- backend:
|
||||
serviceName: "postgres-operator-ui"
|
||||
servicePort: 80
|
||||
- path: /
|
||||
pathType: ImplementationSpecific
|
||||
backend:
|
||||
service:
|
||||
name: "postgres-operator-ui"
|
||||
port:
|
||||
number: 80
|
||||
|
|
|
|||
Loading…
Reference in New Issue