Merge branch 'master' into gh-pages
This commit is contained in:
commit
a426508965
|
|
@ -17,6 +17,8 @@ jobs:
|
|||
go-version: "^1.17.4"
|
||||
- name: Make dependencies
|
||||
run: make deps mocks
|
||||
- name: Code generation
|
||||
run: make codegen
|
||||
- name: Compile
|
||||
run: make linux
|
||||
- name: Run unit tests
|
||||
|
|
|
|||
|
|
@ -26,6 +26,7 @@ _testmain.go
|
|||
*.test
|
||||
*.prof
|
||||
/vendor/
|
||||
/kubectl-pg/vendor/
|
||||
/build/
|
||||
/docker/build/
|
||||
/github.com/
|
||||
|
|
|
|||
|
|
@ -1,2 +1,2 @@
|
|||
# global owners
|
||||
* @sdudoladov @Jan-M @CyberDem0n @FxKu @jopadi @idanovinda
|
||||
* @sdudoladov @Jan-M @FxKu @jopadi @idanovinda @hughcapet
|
||||
|
|
|
|||
|
|
@ -3,3 +3,4 @@ Felix Kunde <felix.kunde@zalando.de>
|
|||
Jan Mussler <jan.mussler@zalando.de>
|
||||
Jociele Padilha <jociele.padilha@zalando.de>
|
||||
Ida Novindasari <ida.novindasari@zalando.de>
|
||||
Polina Bungina <polina.bungina@zalando.de>
|
||||
4
Makefile
4
Makefile
|
|
@ -73,7 +73,7 @@ docker: ${DOCKERDIR}/${DOCKERFILE} docker-context
|
|||
cd "${DOCKERDIR}" && docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_FRESH)$(DEBUG_POSTFIX)" -f "${DOCKERFILE}" .
|
||||
|
||||
indocker-race:
|
||||
docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.17.3 bash -c "make linux"
|
||||
docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.18.9 bash -c "make linux"
|
||||
|
||||
push:
|
||||
docker push "$(IMAGE):$(TAG)$(CDP_TAG)"
|
||||
|
|
@ -85,7 +85,7 @@ mocks:
|
|||
GO111MODULE=on go generate ./...
|
||||
|
||||
tools:
|
||||
GO111MODULE=on go get -d k8s.io/client-go@kubernetes-1.22.4
|
||||
GO111MODULE=on go get -d k8s.io/client-go@kubernetes-1.23.5
|
||||
GO111MODULE=on go install github.com/golang/mock/mockgen@v1.6.0
|
||||
GO111MODULE=on go mod tidy
|
||||
|
||||
|
|
|
|||
|
|
@ -29,13 +29,13 @@ pipelines with no access to Kubernetes API directly, promoting infrastructure as
|
|||
|
||||
### PostgreSQL features
|
||||
|
||||
* Supports PostgreSQL 14, starting from 9.6+
|
||||
* Supports PostgreSQL 15, starting from 10+
|
||||
* Streaming replication cluster via Patroni
|
||||
* Point-In-Time-Recovery with
|
||||
[pg_basebackup](https://www.postgresql.org/docs/11/app-pgbasebackup.html) /
|
||||
[WAL-E](https://github.com/wal-e/wal-e) via [Spilo](https://github.com/zalando/spilo)
|
||||
* Preload libraries: [bg_mon](https://github.com/CyberDem0n/bg_mon),
|
||||
[pg_stat_statements](https://www.postgresql.org/docs/14/pgstatstatements.html),
|
||||
[pg_stat_statements](https://www.postgresql.org/docs/15/pgstatstatements.html),
|
||||
[pgextwlist](https://github.com/dimitri/pgextwlist),
|
||||
[pg_auth_mon](https://github.com/RafiaSabih/pg_auth_mon)
|
||||
* Incl. popular Postgres extensions such as
|
||||
|
|
@ -61,7 +61,7 @@ We introduce the major version into the backup path to smoothen the [major versi
|
|||
The new operator configuration can set a compatibility flag *enable_spilo_wal_path_compat* to make Spilo look for wal segments in the current path but also old format paths.
|
||||
This comes at potential performance costs and should be disabled after a few days.
|
||||
|
||||
The newest Spilo image is: `registry.opensource.zalan.do/acid/spilo-14:2.1-p6`
|
||||
The newest Spilo image is: `ghcr.io/zalando/spilo-15:2.1-p9`
|
||||
|
||||
The last Spilo 12 image is: `registry.opensource.zalan.do/acid/spilo-12:1.6-p5`
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v2
|
||||
name: postgres-operator-ui
|
||||
version: 1.8.2
|
||||
appVersion: 1.8.2
|
||||
version: 1.9.0
|
||||
appVersion: 1.9.0
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
|
||||
keywords:
|
||||
|
|
|
|||
|
|
@ -1,9 +1,32 @@
|
|||
apiVersion: v1
|
||||
entries:
|
||||
postgres-operator-ui:
|
||||
- apiVersion: v2
|
||||
appVersion: 1.9.0
|
||||
created: "2023-01-17T15:45:57.564334046+01:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: df434af6c8b697fe0631017ecc25e3c79e125361ae6622347cea41a545153bdc
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- ui
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator-ui
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-ui-1.9.0.tgz
|
||||
version: 1.9.0
|
||||
- apiVersion: v2
|
||||
appVersion: 1.8.2
|
||||
created: "2022-06-20T11:58:48.148537324+02:00"
|
||||
created: "2023-01-17T15:45:57.562574292+01:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: fbfc90fa8fd007a08a7c02e0ec9108bb8282cbb42b8c976d88f2193d6edff30c
|
||||
|
|
@ -26,7 +49,7 @@ entries:
|
|||
version: 1.8.2
|
||||
- apiVersion: v2
|
||||
appVersion: 1.8.1
|
||||
created: "2022-06-20T11:58:48.147974157+02:00"
|
||||
created: "2023-01-17T15:45:57.561981294+01:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: d26342e385ea51a0fbfbe23477999863e9489664ae803ea5c56da8897db84d24
|
||||
|
|
@ -49,7 +72,7 @@ entries:
|
|||
version: 1.8.1
|
||||
- apiVersion: v1
|
||||
appVersion: 1.8.0
|
||||
created: "2022-06-20T11:58:48.147454782+02:00"
|
||||
created: "2023-01-17T15:45:57.561383172+01:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: d4a7b40c23fd167841cc28342afdbd5ecc809181913a5c31061c83139187f148
|
||||
|
|
@ -72,7 +95,7 @@ entries:
|
|||
version: 1.8.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.7.1
|
||||
created: "2022-06-20T11:58:48.14693682+02:00"
|
||||
created: "2023-01-17T15:45:57.560738084+01:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: 97aed1a1d37cd5f8441eea9522f38e56cc829786ad2134c437a5e6a15c995869
|
||||
|
|
@ -95,7 +118,7 @@ entries:
|
|||
version: 1.7.1
|
||||
- apiVersion: v1
|
||||
appVersion: 1.7.0
|
||||
created: "2022-06-20T11:58:48.146431264+02:00"
|
||||
created: "2023-01-17T15:45:57.560150807+01:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: 37fba1968347daad393dbd1c6ee6e5b6a24d1095f972c0102197531c62dcada8
|
||||
|
|
@ -116,96 +139,4 @@ entries:
|
|||
urls:
|
||||
- postgres-operator-ui-1.7.0.tgz
|
||||
version: 1.7.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.3
|
||||
created: "2022-06-20T11:58:48.14552248+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: 08b810aa632dcc719e4785ef184e391267f7c460caa99677f2d00719075aac78
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- ui
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator-ui
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-ui-1.6.3.tgz
|
||||
version: 1.6.3
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.2
|
||||
created: "2022-06-20T11:58:48.145033254+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: 14d1559bb0bd1e1e828f2daaaa6f6ac9ffc268d79824592c3589b55dd39241f6
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- ui
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator-ui
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-ui-1.6.2.tgz
|
||||
version: 1.6.2
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.1
|
||||
created: "2022-06-20T11:58:48.144518247+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: 3d321352f2f1e7bb7450aa8876e3d818aa9f9da9bd4250507386f0490f2c1969
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- ui
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator-ui
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-ui-1.6.1.tgz
|
||||
version: 1.6.1
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.0
|
||||
created: "2022-06-20T11:58:48.143943237+02:00"
|
||||
description: Postgres Operator UI provides a graphical interface for a convenient
|
||||
database-as-a-service user experience
|
||||
digest: 1e0aa1e7db3c1daa96927ffbf6fdbcdb434562f961833cb5241ddbe132220ee4
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- ui
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator-ui
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-ui-1.6.0.tgz
|
||||
version: 1.6.0
|
||||
generated: "2022-06-20T11:58:48.143164875+02:00"
|
||||
generated: "2023-01-17T15:45:57.558968127+01:00"
|
||||
|
|
|
|||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
|
@ -19,6 +19,10 @@ spec:
|
|||
labels:
|
||||
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- with .Values.podAnnotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
serviceAccountName: {{ include "postgres-operator-ui.serviceAccountName" . }}
|
||||
{{- if .Values.imagePullSecrets }}
|
||||
|
|
@ -75,7 +79,12 @@ spec:
|
|||
"cost_throughput": 0.0476,
|
||||
"cost_core": 0.0575,
|
||||
"cost_memory": 0.014375,
|
||||
"free_iops": 3000,
|
||||
"free_throughput": 125,
|
||||
"limit_iops": 16000,
|
||||
"limit_throughput": 1000,
|
||||
"postgresql_versions": [
|
||||
"15",
|
||||
"14",
|
||||
"13",
|
||||
"12",
|
||||
|
|
|
|||
|
|
@ -6,6 +6,10 @@ metadata:
|
|||
helm.sh/chart: {{ template "postgres-operator-ui.chart" . }}
|
||||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
{{- with .Values.service.annotations }}
|
||||
annotations:
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
name: {{ template "postgres-operator-ui.fullname" . }}
|
||||
namespace: {{ .Release.Namespace }}
|
||||
spec:
|
||||
|
|
|
|||
|
|
@ -8,7 +8,7 @@ replicaCount: 1
|
|||
image:
|
||||
registry: registry.opensource.zalan.do
|
||||
repository: acid/postgres-operator-ui
|
||||
tag: v1.8.2
|
||||
tag: v1.9.0
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# Optionally specify an array of imagePullSecrets.
|
||||
|
|
@ -48,6 +48,10 @@ envs:
|
|||
teams:
|
||||
- "acid"
|
||||
|
||||
# Extra pod annotations
|
||||
podAnnotations:
|
||||
{}
|
||||
|
||||
# configure extra UI ENVs
|
||||
# Extra ENVs are writen in kubenertes format and added "as is" to the pod's env variables
|
||||
# https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/
|
||||
|
|
@ -85,6 +89,8 @@ service:
|
|||
# If the type of the service is NodePort a port can be specified using the nodePort field
|
||||
# If the nodePort field is not specified, or if it has no value, then a random port is used
|
||||
# nodePort: 32521
|
||||
annotations:
|
||||
{}
|
||||
|
||||
# configure UI ingress. If needed: "enabled: true"
|
||||
ingress:
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: v2
|
||||
name: postgres-operator
|
||||
version: 1.8.2
|
||||
appVersion: 1.8.2
|
||||
version: 1.9.0
|
||||
appVersion: 1.9.0
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
|
||||
keywords:
|
||||
|
|
|
|||
|
|
@ -68,7 +68,7 @@ spec:
|
|||
type: string
|
||||
docker_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/spilo-14:2.1-p6"
|
||||
default: "ghcr.io/zalando/spilo-15:2.1-p9"
|
||||
enable_crd_registration:
|
||||
type: boolean
|
||||
default: true
|
||||
|
|
@ -88,9 +88,14 @@ spec:
|
|||
enable_spilo_wal_path_compat:
|
||||
type: boolean
|
||||
default: false
|
||||
enable_team_id_clustername_prefix:
|
||||
type: boolean
|
||||
default: false
|
||||
etcd_host:
|
||||
type: string
|
||||
default: ""
|
||||
ignore_instance_limits_annotation_key:
|
||||
type: string
|
||||
kubernetes_use_configmaps:
|
||||
type: boolean
|
||||
default: false
|
||||
|
|
@ -162,10 +167,10 @@ spec:
|
|||
type: string
|
||||
minimal_major_version:
|
||||
type: string
|
||||
default: "9.6"
|
||||
default: "11"
|
||||
target_major_version:
|
||||
type: string
|
||||
default: "14"
|
||||
default: "15"
|
||||
kubernetes:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -209,6 +214,9 @@ spec:
|
|||
enable_pod_disruption_budget:
|
||||
type: boolean
|
||||
default: true
|
||||
enable_readiness_probe:
|
||||
type: boolean
|
||||
default: false
|
||||
enable_sidecars:
|
||||
type: boolean
|
||||
default: true
|
||||
|
|
@ -270,6 +278,9 @@ spec:
|
|||
pdb_name_format:
|
||||
type: string
|
||||
default: "postgres-{cluster}-pdb"
|
||||
pod_antiaffinity_preferred_during_scheduling:
|
||||
type: boolean
|
||||
default: false
|
||||
pod_antiaffinity_topology_key:
|
||||
type: string
|
||||
default: "kubernetes.io/hostname"
|
||||
|
|
@ -303,6 +314,9 @@ spec:
|
|||
secret_name_template:
|
||||
type: string
|
||||
default: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
||||
share_pgsocket_with_sidecars:
|
||||
type: boolean
|
||||
default: false
|
||||
spilo_allow_privilege_escalation:
|
||||
type: boolean
|
||||
default: true
|
||||
|
|
@ -319,6 +333,7 @@ spec:
|
|||
type: string
|
||||
enum:
|
||||
- "ebs"
|
||||
- "mixed"
|
||||
- "pvc"
|
||||
- "off"
|
||||
default: "pvc"
|
||||
|
|
@ -347,6 +362,12 @@ spec:
|
|||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
default: "100Mi"
|
||||
max_cpu_request:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
max_memory_request:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
min_cpu_limit:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
|
|
@ -411,9 +432,15 @@ spec:
|
|||
- "Local"
|
||||
default: "Cluster"
|
||||
master_dns_name_format:
|
||||
type: string
|
||||
default: "{cluster}.{namespace}.{hostedzone}"
|
||||
master_legacy_dns_name_format:
|
||||
type: string
|
||||
default: "{cluster}.{team}.{hostedzone}"
|
||||
replica_dns_name_format:
|
||||
type: string
|
||||
default: "{cluster}-repl.{namespace}.{hostedzone}"
|
||||
replica_legacy_dns_name_format:
|
||||
type: string
|
||||
default: "{cluster}-repl.{team}.{hostedzone}"
|
||||
aws_or_gcp:
|
||||
|
|
@ -448,16 +475,38 @@ spec:
|
|||
logical_backup:
|
||||
type: object
|
||||
properties:
|
||||
logical_backup_azure_storage_account_name:
|
||||
type: string
|
||||
logical_backup_azure_storage_container:
|
||||
type: string
|
||||
logical_backup_azure_storage_account_key:
|
||||
type: string
|
||||
logical_backup_cpu_limit:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
logical_backup_cpu_request:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
logical_backup_docker_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/logical-backup:v1.8.2"
|
||||
default: "registry.opensource.zalan.do/acid/logical-backup:v1.9.0"
|
||||
logical_backup_google_application_credentials:
|
||||
type: string
|
||||
logical_backup_job_prefix:
|
||||
type: string
|
||||
default: "logical-backup-"
|
||||
logical_backup_memory_limit:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
logical_backup_memory_request:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
logical_backup_provider:
|
||||
type: string
|
||||
enum:
|
||||
- "az"
|
||||
- "gcs"
|
||||
- "s3"
|
||||
default: "s3"
|
||||
logical_backup_s3_access_key_id:
|
||||
type: string
|
||||
|
|
@ -588,7 +637,7 @@ spec:
|
|||
default: "pooler"
|
||||
connection_pooler_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/pgbouncer:master-22"
|
||||
default: "registry.opensource.zalan.do/acid/pgbouncer:master-26"
|
||||
connection_pooler_max_db_connections:
|
||||
type: integer
|
||||
default: 60
|
||||
|
|
@ -618,6 +667,12 @@ spec:
|
|||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
default: "100Mi"
|
||||
patroni:
|
||||
type: object
|
||||
properties:
|
||||
failsafe_mode:
|
||||
type: boolean
|
||||
default: false
|
||||
status:
|
||||
type: object
|
||||
additionalProperties:
|
||||
|
|
|
|||
|
|
@ -223,6 +223,10 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$'
|
||||
masterServiceAnnotations:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
nodeAffinity:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -320,6 +324,8 @@ spec:
|
|||
patroni:
|
||||
type: object
|
||||
properties:
|
||||
failsafe_mode:
|
||||
type: boolean
|
||||
initdb:
|
||||
type: object
|
||||
additionalProperties:
|
||||
|
|
@ -365,13 +371,12 @@ spec:
|
|||
version:
|
||||
type: string
|
||||
enum:
|
||||
- "9.5"
|
||||
- "9.6"
|
||||
- "10"
|
||||
- "11"
|
||||
- "12"
|
||||
- "13"
|
||||
- "14"
|
||||
- "15"
|
||||
parameters:
|
||||
type: object
|
||||
additionalProperties:
|
||||
|
|
@ -401,6 +406,10 @@ spec:
|
|||
replicaLoadBalancer:
|
||||
type: boolean
|
||||
description: deprecated
|
||||
replicaServiceAnnotations:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
resources:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -620,7 +629,7 @@ spec:
|
|||
operator:
|
||||
type: string
|
||||
enum:
|
||||
- DoesNotExists
|
||||
- DoesNotExist
|
||||
- Exists
|
||||
- In
|
||||
- NotIn
|
||||
|
|
|
|||
|
|
@ -1,9 +1,31 @@
|
|||
apiVersion: v1
|
||||
entries:
|
||||
postgres-operator:
|
||||
- apiVersion: v2
|
||||
appVersion: 1.9.0
|
||||
created: "2023-01-17T15:33:03.869287885+01:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: 64df90c898ca591eb3a330328173ffaadfbf9ddd474d8c42ed143edc9e3f4276
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-1.9.0.tgz
|
||||
version: 1.9.0
|
||||
- apiVersion: v2
|
||||
appVersion: 1.8.2
|
||||
created: "2022-06-20T11:57:53.031245647+02:00"
|
||||
created: "2023-01-17T15:33:03.86746187+01:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: f77ffad2e98b72a621e5527015cf607935d3ed688f10ba4b626435acb9631b5b
|
||||
|
|
@ -25,7 +47,7 @@ entries:
|
|||
version: 1.8.2
|
||||
- apiVersion: v2
|
||||
appVersion: 1.8.1
|
||||
created: "2022-06-20T11:57:53.029722276+02:00"
|
||||
created: "2023-01-17T15:33:03.865880826+01:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: ee0c3bb6ba72fa4289ba3b1c6060e5b312dd023faba2a61b4cb7d9e5e2cc57a5
|
||||
|
|
@ -47,7 +69,7 @@ entries:
|
|||
version: 1.8.1
|
||||
- apiVersion: v1
|
||||
appVersion: 1.8.0
|
||||
created: "2022-06-20T11:57:53.028188865+02:00"
|
||||
created: "2023-01-17T15:33:03.8643608+01:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: 3ae232cf009e09aa2ad11c171484cd2f1b72e63c59735e58fbe2b6eb842f4c86
|
||||
|
|
@ -69,7 +91,7 @@ entries:
|
|||
version: 1.8.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.7.1
|
||||
created: "2022-06-20T11:57:53.026647776+02:00"
|
||||
created: "2023-01-17T15:33:03.862914146+01:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: 7262563bec0b058e669ae6bcff0226e33fa9ece9c41ac46a53274046afe7700c
|
||||
|
|
@ -91,7 +113,7 @@ entries:
|
|||
version: 1.7.1
|
||||
- apiVersion: v1
|
||||
appVersion: 1.7.0
|
||||
created: "2022-06-20T11:57:53.02514275+02:00"
|
||||
created: "2023-01-17T15:33:03.861539439+01:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: c3e99fb94305f81484b8b1af18eefb78681f3b5d057d5ad10565e4afb7c65ffe
|
||||
|
|
@ -111,92 +133,4 @@ entries:
|
|||
urls:
|
||||
- postgres-operator-1.7.0.tgz
|
||||
version: 1.7.0
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.3
|
||||
created: "2022-06-20T11:57:53.022692764+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: ea08f991bf23c9ad114bca98ebcbe3e2fa15beab163061399394905eaee89b35
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-1.6.3.tgz
|
||||
version: 1.6.3
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.2
|
||||
created: "2022-06-20T11:57:53.021045272+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: d886f8a0879ca07d1e5246ee7bc55710e1c872f3977280fe495db6fc2057a7f4
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-1.6.2.tgz
|
||||
version: 1.6.2
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.1
|
||||
created: "2022-06-20T11:57:53.019428631+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: 4ba5972cd486dcaa2d11c5613a6f97f6b7b831822e610fe9e10a57ea1db23556
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-1.6.1.tgz
|
||||
version: 1.6.1
|
||||
- apiVersion: v1
|
||||
appVersion: 1.6.0
|
||||
created: "2022-06-20T11:57:53.017863057+02:00"
|
||||
description: Postgres Operator creates and manages PostgreSQL clusters running
|
||||
in Kubernetes
|
||||
digest: f52149718ea364f46b4b9eec9a65f6253ad182bb78df541d14cd5277b9c8a8c3
|
||||
home: https://github.com/zalando/postgres-operator
|
||||
keywords:
|
||||
- postgres
|
||||
- operator
|
||||
- cloud-native
|
||||
- patroni
|
||||
- spilo
|
||||
maintainers:
|
||||
- email: opensource@zalando.de
|
||||
name: Zalando
|
||||
name: postgres-operator
|
||||
sources:
|
||||
- https://github.com/zalando/postgres-operator
|
||||
urls:
|
||||
- postgres-operator-1.6.0.tgz
|
||||
version: 1.6.0
|
||||
generated: "2022-06-20T11:57:53.016179465+02:00"
|
||||
generated: "2023-01-17T15:33:03.859917247+01:00"
|
||||
|
|
|
|||
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
|
|
@ -57,6 +57,14 @@ spec:
|
|||
{{ toYaml .Values.resources | indent 10 }}
|
||||
securityContext:
|
||||
{{ toYaml .Values.securityContext | indent 10 }}
|
||||
{{- if .Values.readinessProbe }}
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readyz
|
||||
port: {{ .Values.configLoggingRestApi.api_port }}
|
||||
initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }}
|
||||
periodSeconds: {{ .Values.readinessProbe.periodSeconds }}
|
||||
{{- end }}
|
||||
{{- if .Values.imagePullSecrets }}
|
||||
imagePullSecrets:
|
||||
{{ toYaml .Values.imagePullSecrets | indent 8 }}
|
||||
|
|
|
|||
|
|
@ -10,9 +10,9 @@ metadata:
|
|||
app.kubernetes.io/managed-by: {{ .Release.Service }}
|
||||
app.kubernetes.io/instance: {{ .Release.Name }}
|
||||
configuration:
|
||||
{{ toYaml .Values.configGeneral | indent 2 }}
|
||||
{{ tpl (toYaml .Values.configGeneral) . | indent 2 }}
|
||||
users:
|
||||
{{ toYaml .Values.configUsers | indent 4 }}
|
||||
{{ tpl (toYaml .Values.configUsers) . | indent 4 }}
|
||||
major_version_upgrade:
|
||||
{{ toYaml .Values.configMajorVersionUpgrade | indent 4 }}
|
||||
kubernetes:
|
||||
|
|
@ -21,7 +21,7 @@ configuration:
|
|||
{{- end }}
|
||||
pod_service_account_name: {{ include "postgres-pod.serviceAccountName" . }}
|
||||
oauth_token_secret_name: {{ template "postgres-operator.fullname" . }}
|
||||
{{ toYaml .Values.configKubernetes | indent 4 }}
|
||||
{{ tpl (toYaml .Values.configKubernetes) . | indent 4 }}
|
||||
postgres_pod_resources:
|
||||
{{ toYaml .Values.configPostgresPodResources | indent 4 }}
|
||||
timeouts:
|
||||
|
|
@ -35,7 +35,7 @@ configuration:
|
|||
debug:
|
||||
{{ toYaml .Values.configDebug | indent 4 }}
|
||||
teams_api:
|
||||
{{ toYaml .Values.configTeamsApi | indent 4 }}
|
||||
{{ tpl (toYaml .Values.configTeamsApi) . | indent 4 }}
|
||||
logging_rest_api:
|
||||
{{ toYaml .Values.configLoggingRestApi | indent 4 }}
|
||||
connection_pooler:
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
image:
|
||||
registry: registry.opensource.zalan.do
|
||||
repository: acid/postgres-operator
|
||||
tag: v1.8.2
|
||||
tag: v1.9.0
|
||||
pullPolicy: "IfNotPresent"
|
||||
|
||||
# Optionally specify an array of imagePullSecrets.
|
||||
|
|
@ -33,12 +33,19 @@ configGeneral:
|
|||
enable_shm_volume: true
|
||||
# enables backwards compatible path between Spilo 12 and Spilo 13+ images
|
||||
enable_spilo_wal_path_compat: false
|
||||
# operator will sync only clusters where name starts with teamId prefix
|
||||
enable_team_id_clustername_prefix: false
|
||||
# etcd connection string for Patroni. Empty uses K8s-native DCS.
|
||||
etcd_host: ""
|
||||
# Spilo docker image
|
||||
docker_image: ghcr.io/zalando/spilo-15:2.1-p9
|
||||
|
||||
# key name for annotation to ignore globally configured instance limits
|
||||
# ignore_instance_limits_annotation_key: ""
|
||||
|
||||
# Select if setup uses endpoints (default), or configmaps to manage leader (DCS=k8s)
|
||||
# kubernetes_use_configmaps: false
|
||||
# Spilo docker image
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-14:2.1-p6
|
||||
|
||||
# min number of instances in Postgres cluster. -1 = no limit
|
||||
min_instances: -1
|
||||
# max number of instances in Postgres cluster. -1 = no limit
|
||||
|
|
@ -82,9 +89,9 @@ configMajorVersionUpgrade:
|
|||
# - acid
|
||||
|
||||
# minimal Postgres major version that will not automatically be upgraded
|
||||
minimal_major_version: "9.6"
|
||||
minimal_major_version: "11"
|
||||
# target Postgres major version when upgrading clusters automatically
|
||||
target_major_version: "14"
|
||||
target_major_version: "15"
|
||||
|
||||
configKubernetes:
|
||||
# list of additional capabilities for postgres container
|
||||
|
|
@ -122,6 +129,8 @@ configKubernetes:
|
|||
enable_pod_antiaffinity: false
|
||||
# toggles PDB to set to MinAvailabe 0 or 1
|
||||
enable_pod_disruption_budget: true
|
||||
# toogles readiness probe for database pods
|
||||
enable_readiness_probe: false
|
||||
# enables sidecar containers to run alongside Spilo in the same pod
|
||||
enable_sidecars: true
|
||||
|
||||
|
|
@ -156,6 +165,8 @@ configKubernetes:
|
|||
|
||||
# defines the template for PDB (Pod Disruption Budget) names
|
||||
pdb_name_format: "postgres-{cluster}-pdb"
|
||||
# switches pod anti affinity type to `preferredDuringSchedulingIgnoredDuringExecution`
|
||||
pod_antiaffinity_preferred_during_scheduling: false
|
||||
# override topology key for pod anti affinity
|
||||
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
||||
# namespaced name of the ConfigMap with environment variables to populate on every pod
|
||||
|
|
@ -180,9 +191,12 @@ configKubernetes:
|
|||
# if the user is in different namespace than cluster and cross namespace secrets
|
||||
# are enabled via `enable_cross_namespace_secret` flag in the configuration.
|
||||
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
||||
# sharing unix socket of PostgreSQL (`pg_socket`) with the sidecars
|
||||
share_pgsocket_with_sidecars: false
|
||||
# set user and group for the spilo container (required to run Spilo as non-root process)
|
||||
# spilo_runasuser: 101
|
||||
# spilo_runasgroup: 103
|
||||
|
||||
# group ID with write-access to volumes (required to run Spilo as non-root process)
|
||||
# spilo_fsgroup: 103
|
||||
|
||||
|
|
@ -191,7 +205,7 @@ configKubernetes:
|
|||
# whether the Spilo container should run with additional permissions other than parent.
|
||||
# required by cron which needs setuid
|
||||
spilo_allow_privilege_escalation: true
|
||||
# storage resize strategy, available options are: ebs, pvc, off
|
||||
# storage resize strategy, available options are: ebs, pvc, off or mixed
|
||||
storage_resize_mode: pvc
|
||||
# pod toleration assigned to instances of every Postgres cluster
|
||||
# toleration:
|
||||
|
|
@ -212,6 +226,12 @@ configPostgresPodResources:
|
|||
default_memory_limit: 500Mi
|
||||
# memory request value for the postgres containers
|
||||
default_memory_request: 100Mi
|
||||
# optional upper boundary for CPU request
|
||||
# max_cpu_request: "1"
|
||||
|
||||
# optional upper boundary for memory request
|
||||
# max_memory_request: 4Gi
|
||||
|
||||
# hard CPU minimum required to properly run a Postgres cluster
|
||||
min_cpu_limit: 250m
|
||||
# hard memory minimum required to properly run a Postgres cluster
|
||||
|
|
@ -256,9 +276,13 @@ configLoadBalancer:
|
|||
# define external traffic policy for the load balancer
|
||||
external_traffic_policy: "Cluster"
|
||||
# defines the DNS name string template for the master load balancer cluster
|
||||
master_dns_name_format: "{cluster}.{team}.{hostedzone}"
|
||||
master_dns_name_format: "{cluster}.{namespace}.{hostedzone}"
|
||||
# deprecated DNS template for master load balancer using team name
|
||||
master_legacy_dns_name_format: "{cluster}.{team}.{hostedzone}"
|
||||
# defines the DNS name string template for the replica load balancer cluster
|
||||
replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}"
|
||||
replica_dns_name_format: "{cluster}-repl.{namespace}.{hostedzone}"
|
||||
# deprecated DNS template for replica load balancer using team name
|
||||
replica_legacy_dns_name_format: "{cluster}-repl.{team}.{hostedzone}"
|
||||
|
||||
# options to aid debugging of the operator itself
|
||||
configDebug:
|
||||
|
|
@ -284,7 +308,7 @@ configAwsOrGcp:
|
|||
# Path to mount the above Secret in the filesystem of the container(s)
|
||||
# additional_secret_mount_path: "/some/dir"
|
||||
|
||||
# AWS region used to store ESB volumes
|
||||
# AWS region used to store EBS volumes
|
||||
aws_region: eu-central-1
|
||||
|
||||
# enable automatic migration on AWS from gp2 to gp3 volumes
|
||||
|
|
@ -312,6 +336,17 @@ configAwsOrGcp:
|
|||
|
||||
# configure K8s cron job managed by the operator
|
||||
configLogicalBackup:
|
||||
# Azure Storage Account specs to store backup results
|
||||
# logical_backup_azure_storage_account_name: ""
|
||||
# logical_backup_azure_storage_container: ""
|
||||
# logical_backup_azure_storage_account_key: ""
|
||||
|
||||
# resources for logical backup pod, if empty configPostgresPodResources will be used
|
||||
# logical_backup_cpu_limit: ""
|
||||
# logical_backup_cpu_request: ""
|
||||
# logical_backup_memory_limit: ""
|
||||
# logical_backup_memory_request: ""
|
||||
|
||||
# image for pods of the logical backup job (example runs pg_dumpall)
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.8.0"
|
||||
# path of google cloud service account json file
|
||||
|
|
@ -319,7 +354,7 @@ configLogicalBackup:
|
|||
|
||||
# prefix for the backup job name
|
||||
logical_backup_job_prefix: "logical-backup-"
|
||||
# storage provider - either "s3" or "gcs"
|
||||
# storage provider - either "s3", "gcs" or "az"
|
||||
logical_backup_provider: "s3"
|
||||
# S3 Access Key ID
|
||||
logical_backup_s3_access_key_id: ""
|
||||
|
|
@ -381,7 +416,7 @@ configConnectionPooler:
|
|||
# db user for pooler to use
|
||||
connection_pooler_user: "pooler"
|
||||
# docker image
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-22"
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-26"
|
||||
# max db connections the pooler should hold
|
||||
connection_pooler_max_db_connections: 60
|
||||
# default pooling mode
|
||||
|
|
@ -394,6 +429,10 @@ configConnectionPooler:
|
|||
connection_pooler_default_cpu_limit: "1"
|
||||
connection_pooler_default_memory_limit: 100Mi
|
||||
|
||||
configPatroni:
|
||||
# enable Patroni DCS failsafe_mode feature
|
||||
failsafe_mode: false
|
||||
|
||||
# Zalando's internal CDC stream feature
|
||||
enableStreams: false
|
||||
|
||||
|
|
@ -435,6 +474,11 @@ securityContext:
|
|||
readOnlyRootFilesystem: true
|
||||
allowPrivilegeEscalation: false
|
||||
|
||||
# Allow to setup operator Deployment readiness probe
|
||||
readinessProbe:
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
|
||||
# Affinity for pod assignment
|
||||
# Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity
|
||||
affinity: {}
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ pipeline:
|
|||
- desc: 'Install go'
|
||||
cmd: |
|
||||
cd /tmp
|
||||
wget -q https://storage.googleapis.com/golang/go1.17.4.linux-amd64.tar.gz -O go.tar.gz
|
||||
wget -q https://storage.googleapis.com/golang/go1.18.9.linux-amd64.tar.gz -O go.tar.gz
|
||||
tar -xf go.tar.gz
|
||||
mv go /usr/local
|
||||
ln -s /usr/local/go/bin/go /usr/bin/go
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
FROM registry.opensource.zalan.do/library/alpine-3.12:latest
|
||||
FROM registry.opensource.zalan.do/library/alpine-3.15:latest
|
||||
LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>"
|
||||
|
||||
# We need root certificates to deal with teams api over https
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
FROM registry.opensource.zalan.do/library/alpine-3.12:latest
|
||||
FROM registry.opensource.zalan.do/library/alpine-3.15:latest
|
||||
LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>"
|
||||
|
||||
# We need root certificates to deal with teams api over https
|
||||
|
|
|
|||
|
|
@ -15,6 +15,7 @@ RUN apt-get update \
|
|||
gnupg \
|
||||
gcc \
|
||||
libffi-dev \
|
||||
&& curl -sL https://aka.ms/InstallAzureCLIDeb | bash \
|
||||
&& pip3 install --upgrade pip \
|
||||
&& pip3 install --no-cache-dir awscli --upgrade \
|
||||
&& pip3 install --no-cache-dir gsutil --upgrade \
|
||||
|
|
@ -23,13 +24,12 @@ RUN apt-get update \
|
|||
&& curl --silent https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \
|
||||
&& apt-get update \
|
||||
&& apt-get install --no-install-recommends -y \
|
||||
postgresql-client-15 \
|
||||
postgresql-client-14 \
|
||||
postgresql-client-13 \
|
||||
postgresql-client-12 \
|
||||
postgresql-client-11 \
|
||||
postgresql-client-10 \
|
||||
postgresql-client-9.6 \
|
||||
postgresql-client-9.5 \
|
||||
&& apt-get clean \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
|
|
|
|||
|
|
@ -40,6 +40,12 @@ function compress {
|
|||
pigz
|
||||
}
|
||||
|
||||
function az_upload {
|
||||
PATH_TO_BACKUP=$LOGICAL_BACKUP_S3_BUCKET"/spilo/"$SCOPE$LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"/logical_backups/"$(date +%s).sql.gz
|
||||
|
||||
az storage blob upload --file "$1" --account-name "$LOGICAL_BACKUP_AZURE_STORAGE_ACCOUNT_NAME" --account-key "$LOGICAL_BACKUP_AZURE_STORAGE_ACCOUNT_KEY" -c "$LOGICAL_BACKUP_AZURE_STORAGE_CONTAINER" -n "$PATH_TO_BACKUP"
|
||||
}
|
||||
|
||||
function aws_delete_objects {
|
||||
args=(
|
||||
"--bucket=$LOGICAL_BACKUP_S3_BUCKET"
|
||||
|
|
@ -120,7 +126,7 @@ function upload {
|
|||
"gcs")
|
||||
gcs_upload
|
||||
;;
|
||||
*)
|
||||
"s3")
|
||||
aws_upload $(($(estimate_size) / DUMP_SIZE_COEFF))
|
||||
aws_delete_outdated
|
||||
;;
|
||||
|
|
@ -174,8 +180,13 @@ for search in "${search_strategy[@]}"; do
|
|||
done
|
||||
|
||||
set -x
|
||||
if [ "$LOGICAL_BACKUP_PROVIDER" == "az" ]; then
|
||||
dump | compress > /tmp/azure-backup.sql.gz
|
||||
az_upload /tmp/azure-backup.sql.gz
|
||||
else
|
||||
dump | compress | upload
|
||||
[[ ${PIPESTATUS[0]} != 0 || ${PIPESTATUS[1]} != 0 || ${PIPESTATUS[2]} != 0 ]] && (( ERRORCOUNT += 1 ))
|
||||
set +x
|
||||
|
||||
exit $ERRORCOUNT
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -516,6 +516,9 @@ configuration:
|
|||
enable_pod_antiaffinity: true
|
||||
```
|
||||
|
||||
By default the type of pod anti affinity is `requiredDuringSchedulingIgnoredDuringExecution`,
|
||||
you can switch to `preferredDuringSchedulingIgnoredDuringExecution` by setting `pod_antiaffinity_preferred_during_scheduling: true`.
|
||||
|
||||
By default the topology key for the pod anti affinity is set to
|
||||
`kubernetes.io/hostname`, you can set another topology key e.g.
|
||||
`failure-domain.beta.kubernetes.io/zone`. See [built-in node labels](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#interlude-built-in-node-labels) for available topology keys.
|
||||
|
|
@ -628,9 +631,9 @@ order (e.g. a variable defined in 4. overrides a variable with the same name
|
|||
in 5.):
|
||||
|
||||
1. Assigned by the operator
|
||||
2. Clone section (with WAL settings from operator config when `s3_wal_path` is empty)
|
||||
3. Standby section
|
||||
4. `env` section in cluster manifest
|
||||
2. `env` section in cluster manifest
|
||||
3. Clone section (with WAL settings from operator config when `s3_wal_path` is empty)
|
||||
4. Standby section
|
||||
5. Pod environment secret via operator config
|
||||
6. Pod environment config map via operator config
|
||||
7. WAL and logical backup settings from operator config
|
||||
|
|
@ -781,9 +784,15 @@ services:
|
|||
This value can't be overwritten. If any changing in its value is needed, it
|
||||
MUST be done changing the DNS format operator config parameters; and
|
||||
- `service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout` with
|
||||
a default value of "3600". This value can be overwritten with the operator
|
||||
config parameter `custom_service_annotations` or the cluster parameter
|
||||
`serviceAnnotations`.
|
||||
a default value of "3600".
|
||||
|
||||
There are multiple options to specify service annotations that will be merged
|
||||
with each other and override in the following order (where latter take
|
||||
precedence):
|
||||
1. Default annotations if LoadBalancer is enabled
|
||||
2. Globally configured `custom_service_annotations`
|
||||
3. `serviceAnnotations` specified in the cluster manifest
|
||||
4. `masterServiceAnnotations` and `replicaServiceAnnotations` specified in the cluster manifest
|
||||
|
||||
To limit the range of IP addresses that can reach a load balancer, specify the
|
||||
desired ranges in the `allowedSourceRanges` field (applies to both master and
|
||||
|
|
@ -801,6 +810,9 @@ Load balancer services can also be enabled for the [connection pooler](user.md#c
|
|||
pods with manifest flags `enableMasterPoolerLoadBalancer` and/or
|
||||
`enableReplicaPoolerLoadBalancer` or in the operator configuration with
|
||||
`enable_master_pooler_load_balancer` and/or `enable_replica_pooler_load_balancer`.
|
||||
For the `external-dns.alpha.kubernetes.io/hostname` annotation the `-pooler`
|
||||
suffix will be appended to the cluster name used in the template which is
|
||||
defined in `master|replica_dns_name_format`.
|
||||
|
||||
## Running periodic 'autorepair' scans of K8s objects
|
||||
|
||||
|
|
@ -1099,9 +1111,10 @@ and `pod-env-overrides` resources applied to your cluster, ensure that the opera
|
|||
is set up like the following:
|
||||
```yml
|
||||
...
|
||||
aws_or_gcp:
|
||||
kubernetes:
|
||||
pod_environment_secret: "psql-backup-creds"
|
||||
pod_environment_configmap: "postgres-operator-system/pod-env-overrides"
|
||||
aws_or_gcp:
|
||||
wal_az_storage_account: "postgresbackupsbucket28302F2" # name of storage account to save the WAL-G logs
|
||||
...
|
||||
```
|
||||
|
|
@ -1110,7 +1123,7 @@ aws_or_gcp:
|
|||
|
||||
If cluster members have to be (re)initialized restoring physical backups
|
||||
happens automatically either from the backup location or by running
|
||||
[pg_basebackup](https://www.postgresql.org/docs/13/app-pgbasebackup.html)
|
||||
[pg_basebackup](https://www.postgresql.org/docs/15/app-pgbasebackup.html)
|
||||
on one of the other running instances (preferably replicas if they do not lag
|
||||
behind). You can test restoring backups by [cloning](user.md#how-to-clone-an-existing-postgresql-cluster)
|
||||
clusters.
|
||||
|
|
@ -1197,6 +1210,10 @@ of the backup cron job.
|
|||
`cronjobs` resource from the `batch` API group for the operator service account.
|
||||
See [example RBAC](https://github.com/zalando/postgres-operator/blob/master/manifests/operator-service-account-rbac.yaml)
|
||||
|
||||
7. Resources of the pod template in the cron job can be configured. When left
|
||||
empty [default values of spilo pods](reference/operator_parameters.md#kubernetes-resource-requests)
|
||||
will be used.
|
||||
|
||||
## Sidecars for Postgres clusters
|
||||
|
||||
A list of sidecars is added to each cluster created by the operator. The default
|
||||
|
|
|
|||
|
|
@ -91,7 +91,7 @@ Please, report any issues discovered to https://github.com/zalando/postgres-oper
|
|||
|
||||
- "Getting started with the Zalando Operator for PostgreSQL" by Daniel Westermann on [dbi services blog](https://blog.dbi-services.com/getting-started-with-the-zalando-operator-for-postgresql/), Mar. 2021.
|
||||
|
||||
- "Our experience with Postgres Operator for Kubernetes by Zalando" by Nikolay Bogdanov on [flant blog](https://blog.flant.com/our-experience-with-postgres-operator-for-kubernetes-by-zalando/), Feb. 2021.
|
||||
- "Our experience with Postgres Operator for Kubernetes by Zalando" by Nikolay Bogdanov on [Palark blog](https://blog.palark.com/our-experience-with-postgres-operator-for-kubernetes-by-zalando/), Feb. 2021.
|
||||
|
||||
- "How to set up continuous backups and monitoring" by Pål Kristensen on [GitHub](https://github.com/zalando/postgres-operator/issues/858#issuecomment-608136253), Mar. 2020.
|
||||
|
||||
|
|
|
|||
|
|
@ -53,8 +53,7 @@ Those parameters are grouped under the `metadata` top-level key.
|
|||
These parameters are grouped directly under the `spec` key in the manifest.
|
||||
|
||||
* **teamId**
|
||||
name of the team the cluster belongs to. Changing it after the cluster
|
||||
creation is not supported. Required field.
|
||||
name of the team the cluster belongs to. Required field.
|
||||
|
||||
* **numberOfInstances**
|
||||
total number of instances for a given cluster. The operator parameters
|
||||
|
|
@ -174,6 +173,22 @@ These parameters are grouped directly under the `spec` key in the manifest.
|
|||
[administrator docs](https://github.com/zalando/postgres-operator/blob/master/docs/administrator.md#load-balancers-and-allowed-ip-ranges)
|
||||
for more information regarding default values and overwrite rules.
|
||||
|
||||
* **masterServiceAnnotations**
|
||||
A map of key value pairs that gets attached as [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
|
||||
to the master service created for the database cluster. Check the
|
||||
[administrator docs](https://github.com/zalando/postgres-operator/blob/master/docs/administrator.md#load-balancers-and-allowed-ip-ranges)
|
||||
for more information regarding default values and overwrite rules.
|
||||
This field overrides `serviceAnnotations` with the same key for the master
|
||||
service if not empty.
|
||||
|
||||
* **replicaServiceAnnotations**
|
||||
A map of key value pairs that gets attached as [annotations](https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/)
|
||||
to the replica service created for the database cluster. Check the
|
||||
[administrator docs](https://github.com/zalando/postgres-operator/blob/master/docs/administrator.md#load-balancers-and-allowed-ip-ranges)
|
||||
for more information regarding default values and overwrite rules.
|
||||
This field overrides `serviceAnnotations` with the same key for the replica
|
||||
service if not empty.
|
||||
|
||||
* **enableShmVolume**
|
||||
Start a database pod without limitations on shm memory. By default Docker
|
||||
limit `/dev/shm` to `64M` (see e.g. the [docker
|
||||
|
|
@ -269,7 +284,7 @@ documentation](https://patroni.readthedocs.io/en/latest/SETTINGS.html) for the
|
|||
explanation of `ttl` and `loop_wait` parameters.
|
||||
|
||||
* **initdb**
|
||||
a map of key-value pairs describing initdb parameters. For `data-checksum`,
|
||||
a map of key-value pairs describing initdb parameters. For `data-checksums`,
|
||||
`debug`, `no-locale`, `noclean`, `nosync` and `sync-only` parameters use
|
||||
`true` as the value if you want to set them. Changes to this option do not
|
||||
affect the already initialized clusters. Optional.
|
||||
|
|
@ -318,6 +333,9 @@ explanation of `ttl` and `loop_wait` parameters.
|
|||
* **synchronous_node_count**
|
||||
Patroni `synchronous_node_count` parameter value. Note, this option is only available for Spilo images with Patroni 2.0+. The default is set to `1`. Optional.
|
||||
|
||||
* **failsafe_mode**
|
||||
Patroni `failsafe_mode` parameter value. If enabled, allows Patroni to cope with DCS outages and avoid leader demotion. Note, this option is currently not included in any Patroni release. The default is set to `false`. Optional.
|
||||
|
||||
## Postgres container resources
|
||||
|
||||
Those parameters define [CPU and memory requests and limits](https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/)
|
||||
|
|
@ -556,7 +574,7 @@ Those parameters are grouped under the `tls` top-level key.
|
|||
## Change data capture streams
|
||||
|
||||
This sections enables change data capture (CDC) streams via Postgres'
|
||||
[logical decoding](https://www.postgresql.org/docs/14/logicaldecoding.html)
|
||||
[logical decoding](https://www.postgresql.org/docs/15/logicaldecoding.html)
|
||||
feature and `pgoutput` plugin. While the Postgres operator takes responsibility
|
||||
for providing the setup to publish change events, it relies on external tools
|
||||
to consume them. At Zalando, we are using a workflow based on
|
||||
|
|
@ -588,7 +606,7 @@ can have the following properties:
|
|||
and `payloadColumn`). The CDC operator is following the [outbox pattern](https://debezium.io/blog/2019/02/19/reliable-microservices-data-exchange-with-the-outbox-pattern/).
|
||||
The application is responsible for putting events into a (JSON/B or VARCHAR)
|
||||
payload column of the outbox table in the structure of the specified target
|
||||
event type. The operator will create a [PUBLICATION](https://www.postgresql.org/docs/14/logical-replication-publication.html)
|
||||
event type. The operator will create a [PUBLICATION](https://www.postgresql.org/docs/15/logical-replication-publication.html)
|
||||
in Postgres for all tables specified for one `database` and `applicationId`.
|
||||
The CDC operator will consume from it shortly after transactions are
|
||||
committed to the outbox table. The `idColumn` will be used in telemetry for
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ configuration.
|
|||
and change it.
|
||||
|
||||
To test the CRD-based configuration locally, use the following
|
||||
|
||||
```bash
|
||||
kubectl create -f manifests/operatorconfiguration.crd.yaml # registers the CRD
|
||||
kubectl create -f manifests/postgresql-operator-default-configuration.yaml
|
||||
|
|
@ -92,6 +93,11 @@ Those are top-level keys, containing both leaf keys and groups.
|
|||
* **enable_spilo_wal_path_compat**
|
||||
enables backwards compatible path between Spilo 12 and Spilo 13+ images. The default is `false`.
|
||||
|
||||
* **enable_team_id_clustername_prefix**
|
||||
To lower the risk of name clashes between clusters of different teams you
|
||||
can turn on this flag and the operator will sync only clusters where the
|
||||
name starts with the `teamId` (from `spec`) plus `-`. Default is `false`.
|
||||
|
||||
* **etcd_host**
|
||||
Etcd connection string for Patroni defined as `host:port`. Not required when
|
||||
Patroni native Kubernetes support is used. The default is empty (use
|
||||
|
|
@ -147,6 +153,12 @@ Those are top-level keys, containing both leaf keys and groups.
|
|||
When `-1` is specified for `min_instances`, no limits are applied. The default
|
||||
is `-1`.
|
||||
|
||||
* **ignore_instance_limits_annotation_key**
|
||||
for some clusters it might be required to scale beyond the limits that can be
|
||||
configured with `min_instances` and `max_instances` options. You can define
|
||||
an annotation key that can be used as a toggle in cluster manifests to ignore
|
||||
globally configured instance limits. The default is empty.
|
||||
|
||||
* **resync_period**
|
||||
period between consecutive sync requests. The default is `30m`.
|
||||
|
||||
|
|
@ -155,10 +167,11 @@ Those are top-level keys, containing both leaf keys and groups.
|
|||
|
||||
* **set_memory_request_to_limit**
|
||||
Set `memory_request` to `memory_limit` for all Postgres clusters (the default
|
||||
value is also increased). This prevents certain cases of memory overcommitment
|
||||
at the cost of overprovisioning memory and potential scheduling problems for
|
||||
containers with high memory limits due to the lack of memory on Kubernetes
|
||||
cluster nodes. This affects all containers created by the operator (Postgres,
|
||||
value is also increased but configured `max_memory_request` can not be
|
||||
bypassed). This prevents certain cases of memory overcommitment at the cost
|
||||
of overprovisioning memory and potential scheduling problems for containers
|
||||
with high memory limits due to the lack of memory on Kubernetes cluster
|
||||
nodes. This affects all containers created by the operator (Postgres,
|
||||
connection pooler, logical backup, scalyr sidecar, and other sidecars except
|
||||
**sidecars** defined in the operator configuration); to set resources for the
|
||||
operator's own container, change the [operator deployment manually](https://github.com/zalando/postgres-operator/blob/master/manifests/postgres-operator.yaml#L20).
|
||||
|
|
@ -233,12 +246,12 @@ CRD-configuration, they are grouped under the `major_version_upgrade` key.
|
|||
|
||||
* **minimal_major_version**
|
||||
The minimal Postgres major version that will not automatically be upgraded
|
||||
when `major_version_upgrade_mode` is set to `"full"`. The default is `"9.6"`.
|
||||
when `major_version_upgrade_mode` is set to `"full"`. The default is `"11"`.
|
||||
|
||||
* **target_major_version**
|
||||
The target Postgres major version when upgrading clusters automatically
|
||||
which violate the configured allowed `minimal_major_version` when
|
||||
`major_version_upgrade_mode` is set to `"full"`. The default is `"14"`.
|
||||
`major_version_upgrade_mode` is set to `"full"`. The default is `"15"`.
|
||||
|
||||
## Kubernetes resources
|
||||
|
||||
|
|
@ -331,6 +344,12 @@ configuration they are grouped under the `kubernetes` key.
|
|||
to run alongside Spilo on the same pod. Globally defined sidecars are always
|
||||
enabled. Default is true.
|
||||
|
||||
* **share_pgsocket_with_sidecars**
|
||||
global option to create an emptyDir volume named `postgresql-run`. This is
|
||||
mounted by all containers at `/var/run/postgresql` sharing the unix socket of
|
||||
PostgreSQL (`pg_socket`) with the sidecars this way.
|
||||
Default is `false`.
|
||||
|
||||
* **secret_name_template**
|
||||
a template for the name of the database user secrets generated by the
|
||||
operator. `{namespace}` is replaced with name of the namespace if
|
||||
|
|
@ -477,6 +496,14 @@ configuration they are grouped under the `kubernetes` key.
|
|||
of stateful sets of PG clusters. The default is `ordered_ready`, the second
|
||||
possible value is `parallel`.
|
||||
|
||||
* **enable_readiness_probe**
|
||||
the operator can set a readiness probe on the statefulset for the database
|
||||
pods with `InitialDelaySeconds: 6`, `PeriodSeconds: 10`, `TimeoutSeconds: 5`,
|
||||
`SuccessThreshold: 1` and `FailureThreshold: 3`. When enabling readiness
|
||||
probes it is recommended to switch the `pod_management_policy` to `parallel`
|
||||
to avoid unneccesary waiting times in case of multiple instances failing.
|
||||
The default is `false`.
|
||||
|
||||
* **storage_resize_mode**
|
||||
defines how operator handles the difference between the requested volume size and
|
||||
the actual size. Available options are:
|
||||
|
|
@ -508,6 +535,12 @@ CRD-based configuration.
|
|||
memory limits for the Postgres containers, unless overridden by cluster-specific
|
||||
settings. The default is `500Mi`.
|
||||
|
||||
* **max_cpu_request**
|
||||
optional upper boundary for CPU request
|
||||
|
||||
* **max_memory_request**
|
||||
optional upper boundary for memory request
|
||||
|
||||
* **min_cpu_limit**
|
||||
hard CPU minimum what we consider to be required to properly run Postgres
|
||||
clusters with Patroni on Kubernetes. The default is `250m`.
|
||||
|
|
@ -594,22 +627,47 @@ In the CRD-based configuration they are grouped under the `load_balancer` key.
|
|||
the cluster. Can be overridden by individual cluster settings. The default
|
||||
is `false`.
|
||||
|
||||
* **external_traffic_policy** defines external traffic policy for load
|
||||
* **external_traffic_policy**
|
||||
defines external traffic policy for load
|
||||
balancers. Allowed values are `Cluster` (default) and `Local`.
|
||||
|
||||
* **master_dns_name_format** defines the DNS name string template for the
|
||||
master load balancer cluster. The default is
|
||||
`{cluster}.{team}.{hostedzone}`, where `{cluster}` is replaced by the cluster
|
||||
name, `{team}` is replaced with the team name and `{hostedzone}` is replaced
|
||||
with the hosted zone (the value of the `db_hosted_zone` parameter). No other
|
||||
placeholders are allowed.
|
||||
* **master_dns_name_format**
|
||||
defines the DNS name string template for the master load balancer cluster.
|
||||
The default is `{cluster}.{namespace}.{hostedzone}`, where `{cluster}` is
|
||||
replaced by the cluster name, `{namespace}` is replaced with the namespace
|
||||
and `{hostedzone}` is replaced with the hosted zone (the value of the
|
||||
`db_hosted_zone` parameter). The `{team}` placeholder can still be used,
|
||||
although it is not recommened because the team of a cluster can change.
|
||||
If the cluster name starts with the `teamId` it will also be part of the
|
||||
DNS, aynway. No other placeholders are allowed!
|
||||
|
||||
* **replica_dns_name_format** defines the DNS name string template for the
|
||||
replica load balancer cluster. The default is
|
||||
`{cluster}-repl.{team}.{hostedzone}`, where `{cluster}` is replaced by the
|
||||
cluster name, `{team}` is replaced with the team name and `{hostedzone}` is
|
||||
replaced with the hosted zone (the value of the `db_hosted_zone` parameter).
|
||||
No other placeholders are allowed.
|
||||
* **master_legacy_dns_name_format**
|
||||
*deprecated* default master DNS template `{cluster}.{team}.{hostedzone}` as
|
||||
of pre `v1.9.0`. If cluster name starts with `teamId` then a second DNS
|
||||
entry will be created using the template defined here to provide backwards
|
||||
compatibility. The `teamId` prefix will be extracted from the clustername
|
||||
because it follows later in the DNS string. When using a customized
|
||||
`master_dns_name_format` make sure to define the legacy DNS format when
|
||||
switching to v1.9.0.
|
||||
|
||||
* **replica_dns_name_format**
|
||||
defines the DNS name string template for the replica load balancer cluster.
|
||||
The default is `{cluster}-repl.{namespace}.{hostedzone}`, where `{cluster}`
|
||||
is replaced by the cluster name, `{namespace}` is replaced with the
|
||||
namespace and `{hostedzone}` is replaced with the hosted zone (the value of
|
||||
the `db_hosted_zone` parameter). The `{team}` placeholder can still be used,
|
||||
although it is not recommened because the team of a cluster can change.
|
||||
If the cluster name starts with the `teamId` it will also be part of the
|
||||
DNS, aynway. No other placeholders are allowed!
|
||||
|
||||
* **replica_legacy_dns_name_format**
|
||||
*deprecated* default master DNS template `{cluster}-repl.{team}.{hostedzone}`
|
||||
as of pre `v1.9.0`. If cluster name starts with `teamId` then a second DNS
|
||||
entry will be created using the template defined here to provide backwards
|
||||
compatibility. The `teamId` prefix will be extracted from the clustername
|
||||
because it follows later in the DNS string. When using a customized
|
||||
`master_dns_name_format` make sure to define the legacy DNS format when
|
||||
switching to v1.9.0.
|
||||
|
||||
## AWS or GCP interaction
|
||||
|
||||
|
|
@ -685,12 +743,19 @@ These parameters configure a K8s cron job managed by the operator to produce
|
|||
Postgres logical backups. In the CRD-based configuration those parameters are
|
||||
grouped under the `logical_backup` key.
|
||||
|
||||
* **logical_backup_cpu_limit**
|
||||
**logical_backup_cpu_request**
|
||||
**logical_backup_memory_limit**
|
||||
**logical_backup_memory_request**
|
||||
Resource configuration for pod template in logical backup cron job. If empty
|
||||
default values from `postgres_pod_resources` will be used.
|
||||
|
||||
* **logical_backup_docker_image**
|
||||
An image for pods of the logical backup job. The [example image](https://github.com/zalando/postgres-operator/blob/master/docker/logical-backup/Dockerfile)
|
||||
runs `pg_dumpall` on a replica if possible and uploads compressed results to
|
||||
an S3 bucket under the key `/spilo/pg_cluster_name/cluster_k8s_uuid/logical_backups`.
|
||||
The default image is the same image built with the Zalando-internal CI
|
||||
pipeline. Default: "registry.opensource.zalan.do/acid/logical-backup:v1.8.2"
|
||||
pipeline. Default: "registry.opensource.zalan.do/acid/logical-backup:v1.9.0"
|
||||
|
||||
* **logical_backup_google_application_credentials**
|
||||
Specifies the path of the google cloud service account json file. Default is empty.
|
||||
|
|
@ -699,8 +764,17 @@ grouped under the `logical_backup` key.
|
|||
The prefix to be prepended to the name of a k8s CronJob running the backups. Beware the prefix counts towards the name length restrictions imposed by k8s. Empty string is a legitimate value. Operator does not do the actual renaming: It simply creates the job with the new prefix. You will have to delete the old cron job manually. Default: "logical-backup-".
|
||||
|
||||
* **logical_backup_provider**
|
||||
Specifies the storage provider to which the backup should be uploaded (`s3` or `gcs`).
|
||||
Default: "s3"
|
||||
Specifies the storage provider to which the backup should be uploaded
|
||||
(`s3`, `gcs` or `az`). Default: "s3"
|
||||
|
||||
* **logical_backup_azure_storage_account_name**
|
||||
Storage account name used to upload logical backups to when using Azure. Default: ""
|
||||
|
||||
* **logical_backup_azure_storage_container**
|
||||
Storage container used to upload logical backups to when using Azure. Default: ""
|
||||
|
||||
* **logical_backup_azure_storage_account_key**
|
||||
Storage account key used to authenticate with Azure when uploading logical backups. Default: ""
|
||||
|
||||
* **logical_backup_s3_access_key_id**
|
||||
When set, value will be in AWS_ACCESS_KEY_ID env variable. The Default is empty.
|
||||
|
|
|
|||
102
docs/user.md
102
docs/user.md
|
|
@ -30,7 +30,7 @@ spec:
|
|||
databases:
|
||||
foo: zalando
|
||||
postgresql:
|
||||
version: "14"
|
||||
version: "15"
|
||||
```
|
||||
|
||||
Once you cloned the Postgres Operator [repository](https://github.com/zalando/postgres-operator)
|
||||
|
|
@ -45,11 +45,12 @@ Make sure, the `spec` section of the manifest contains at least a `teamId`, the
|
|||
The minimum volume size to run the `postgresql` resource on Elastic Block
|
||||
Storage (EBS) is `1Gi`.
|
||||
|
||||
Note, that the name of the cluster must start with the `teamId` and `-`. At
|
||||
Zalando we use team IDs (nicknames) to lower the chance of duplicate cluster
|
||||
names and colliding entities. The team ID would also be used to query an API to
|
||||
get all members of a team and create [database roles](#teams-api-roles) for
|
||||
them. Besides, the maximum cluster name length is 53 characters.
|
||||
Note, that when `enable_team_id_clustername_prefix` is set to `true` the name
|
||||
of the cluster must start with the `teamId` and `-`. At Zalando we use team IDs
|
||||
(nicknames) to lower chances of duplicate cluster names and colliding entities.
|
||||
The team ID would also be used to query an API to get all members of a team
|
||||
and create [database roles](#teams-api-roles) for them. Besides, the maximum
|
||||
cluster name length is 53 characters.
|
||||
|
||||
## Watch pods being created
|
||||
|
||||
|
|
@ -108,7 +109,7 @@ metadata:
|
|||
spec:
|
||||
[...]
|
||||
postgresql:
|
||||
version: "14"
|
||||
version: "15"
|
||||
parameters:
|
||||
password_encryption: scram-sha-256
|
||||
```
|
||||
|
|
@ -151,7 +152,7 @@ specified explicitly.
|
|||
|
||||
The operator automatically generates a password for each manifest role and
|
||||
places it in the secret named
|
||||
`{username}.{team}-{clustername}.credentials.postgresql.acid.zalan.do` in the
|
||||
`{username}.{clustername}.credentials.postgresql.acid.zalan.do` in the
|
||||
same namespace as the cluster. This way, the application running in the
|
||||
K8s cluster and connecting to Postgres can obtain the password right from the
|
||||
secret, without ever sharing it outside of the cluster.
|
||||
|
|
@ -181,7 +182,7 @@ be in the form of `namespace.username`.
|
|||
|
||||
For such usernames, the secret is created in the given namespace and its name is
|
||||
of the following form,
|
||||
`{namespace}.{username}.{team}-{clustername}.credentials.postgresql.acid.zalan.do`
|
||||
`{namespace}.{username}.{clustername}.credentials.postgresql.acid.zalan.do`
|
||||
|
||||
### Infrastructure roles
|
||||
|
||||
|
|
@ -222,7 +223,7 @@ the user name, password etc. The secret itself is referenced by the
|
|||
above list them separately.
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
apiVersion: "acid.zalan.do/v1"
|
||||
kind: OperatorConfiguration
|
||||
metadata:
|
||||
name: postgresql-operator-configuration
|
||||
|
|
@ -516,7 +517,7 @@ Postgres Operator will create the following NOLOGIN roles:
|
|||
|
||||
The `<dbname>_owner` role is the database owner and should be used when creating
|
||||
new database objects. All members of the `admin` role, e.g. teams API roles, can
|
||||
become the owner with the `SET ROLE` command. [Default privileges](https://www.postgresql.org/docs/12/sql-alterdefaultprivileges.html)
|
||||
become the owner with the `SET ROLE` command. [Default privileges](https://www.postgresql.org/docs/15/sql-alterdefaultprivileges.html)
|
||||
are configured for the owner role so that the `<dbname>_reader` role
|
||||
automatically gets read-access (SELECT) to new tables and sequences and the
|
||||
`<dbname>_writer` receives write-access (INSERT, UPDATE, DELETE on tables,
|
||||
|
|
@ -591,7 +592,7 @@ spec:
|
|||
|
||||
### Schema `search_path` for default roles
|
||||
|
||||
The schema [`search_path`](https://www.postgresql.org/docs/13/ddl-schemas.html#DDL-SCHEMAS-PATH)
|
||||
The schema [`search_path`](https://www.postgresql.org/docs/15/ddl-schemas.html#DDL-SCHEMAS-PATH)
|
||||
for each role will include the role name and the schemas, this role should have
|
||||
access to. So `foo_bar_writer` does not have to schema-qualify tables from
|
||||
schemas `foo_bar_writer, bar`, while `foo_writer` can look up `foo_writer` and
|
||||
|
|
@ -811,7 +812,7 @@ spec:
|
|||
### Clone directly
|
||||
|
||||
Another way to get a fresh copy of your source DB cluster is via
|
||||
[pg_basebackup](https://www.postgresql.org/docs/13/app-pgbasebackup.html). To
|
||||
[pg_basebackup](https://www.postgresql.org/docs/15/app-pgbasebackup.html). To
|
||||
use this feature simply leave out the timestamp field from the clone section.
|
||||
The operator will connect to the service of the source cluster by name. If the
|
||||
cluster is called test, then the connection string will look like host=test
|
||||
|
|
@ -1005,6 +1006,42 @@ option must be set to `true`.
|
|||
|
||||
If you want to add a sidecar to every cluster managed by the operator, you can specify it in the [operator configuration](administrator.md#sidecars-for-postgres-clusters) instead.
|
||||
|
||||
### Accessing the PostgreSQL socket from sidecars
|
||||
|
||||
If enabled by the `share_pgsocket_with_sidecars` option in the operator
|
||||
configuration the PostgreSQL socket is placed in a volume of type `emptyDir`
|
||||
named `postgresql-run`. To allow access to the socket from any sidecar
|
||||
container simply add a VolumeMount to this volume to your sidecar spec.
|
||||
|
||||
```yaml
|
||||
- name: "container-name"
|
||||
image: "company/image:tag"
|
||||
volumeMounts:
|
||||
- mountPath: /var/run
|
||||
name: postgresql-run
|
||||
```
|
||||
|
||||
If you do not want to globally enable this feature and only use it for single
|
||||
Postgres clusters, specify an `EmptyDir` volume under `additionalVolumes` in
|
||||
the manifest:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
additionalVolumes:
|
||||
- name: postgresql-run
|
||||
mountPath: /var/run/postgresql
|
||||
targetContainers:
|
||||
- all
|
||||
volumeSource:
|
||||
emptyDir: {}
|
||||
sidecars:
|
||||
- name: "container-name"
|
||||
image: "company/image:tag"
|
||||
volumeMounts:
|
||||
- mountPath: /var/run
|
||||
name: postgresql-run
|
||||
```
|
||||
|
||||
## InitContainers Support
|
||||
|
||||
Each cluster can specify arbitrary init containers to run. These containers can
|
||||
|
|
@ -1029,9 +1066,9 @@ specified but globally disabled in the configuration. The
|
|||
|
||||
## Increase volume size
|
||||
|
||||
Postgres operator supports statefulset volume resize if you're using the
|
||||
operator on top of AWS. For that you need to change the size field of the
|
||||
volume description in the cluster manifest and apply the change:
|
||||
Postgres operator supports statefulset volume resize without doing a rolling
|
||||
update. For that you need to change the size field of the volume description
|
||||
in the cluster manifest and apply the change:
|
||||
|
||||
```yaml
|
||||
spec:
|
||||
|
|
@ -1040,22 +1077,29 @@ spec:
|
|||
```
|
||||
|
||||
The operator compares the new value of the size field with the previous one and
|
||||
acts on differences.
|
||||
acts on differences. The `storage_resize_mode` can be configured. By default,
|
||||
the operator will adjust the PVCs and leave it to K8s and the infrastructure to
|
||||
apply the change.
|
||||
|
||||
You can only enlarge the volume with the process described above, shrinking is
|
||||
not supported and will emit a warning. After this update all the new volumes in
|
||||
the statefulset are allocated according to the new size. To enlarge persistent
|
||||
volumes attached to the running pods, the operator performs the following
|
||||
actions:
|
||||
When using AWS with gp3 volumes you should set the mode to `mixed` because it
|
||||
will also adjust the IOPS and throughput that can be defined in the manifest.
|
||||
Check the [AWS docs](https://aws.amazon.com/ebs/general-purpose/) to learn
|
||||
about default and maximum values. Keep in mind that AWS rate-limits updating
|
||||
volume specs to no more than once every 6 hours.
|
||||
|
||||
* call AWS API to change the volume size
|
||||
```yaml
|
||||
spec:
|
||||
volume:
|
||||
size: 5Gi # new volume size
|
||||
iops: 4000
|
||||
throughput: 500
|
||||
```
|
||||
|
||||
* connect to pod using `kubectl exec` and resize filesystem with `resize2fs`.
|
||||
|
||||
Fist step has a limitation, AWS rate-limits this operation to no more than once
|
||||
every 6 hours. Note, that if the statefulset is scaled down before resizing the
|
||||
new size is only applied to the volumes attached to the running pods. The
|
||||
size of volumes that correspond to the previously running pods is not changed.
|
||||
The operator can only enlarge volumes. Shrinking is not supported and will emit
|
||||
a warning. However, it can be done manually after updating the manifest. You
|
||||
have to delete the PVC, which will hang until you also delete the corresponding
|
||||
pod. Proceed with the next pod when the cluster is healthy again and replicas
|
||||
are streaming.
|
||||
|
||||
## Logical backups
|
||||
|
||||
|
|
|
|||
|
|
@ -16,7 +16,7 @@ RUN apt-get update \
|
|||
curl \
|
||||
vim \
|
||||
&& pip3 install --no-cache-dir -r requirements.txt \
|
||||
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.22.0/bin/linux/amd64/kubectl \
|
||||
&& curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.24.3/bin/linux/amd64/kubectl \
|
||||
&& chmod +x ./kubectl \
|
||||
&& mv ./kubectl /usr/local/bin/kubectl \
|
||||
&& apt-get clean \
|
||||
|
|
|
|||
|
|
@ -32,7 +32,7 @@ clean:
|
|||
|
||||
copy: clean
|
||||
mkdir manifests
|
||||
cp ../manifests -r .
|
||||
cp -r ../manifests .
|
||||
|
||||
docker: scm-source.json
|
||||
docker build -t "$(IMAGE):$(TAG)" .
|
||||
|
|
@ -47,7 +47,7 @@ tools:
|
|||
# install pinned version of 'kind'
|
||||
# go install must run outside of a dir with a (module-based) Go project !
|
||||
# otherwise go install updates project's dependencies and/or behaves differently
|
||||
cd "/tmp" && GO111MODULE=on go install sigs.k8s.io/kind@v0.11.1
|
||||
cd "/tmp" && GO111MODULE=on go install sigs.k8s.io/kind@v0.14.0
|
||||
|
||||
e2etest: tools copy clean
|
||||
./run.sh main
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@
|
|||
export cluster_name="postgres-operator-e2e-tests"
|
||||
export kubeconfig_path="/tmp/kind-config-${cluster_name}"
|
||||
export operator_image="registry.opensource.zalan.do/acid/postgres-operator:latest"
|
||||
export e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.3"
|
||||
export e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.4"
|
||||
|
||||
docker run -it --entrypoint /bin/bash --network=host -e "TERM=xterm-256color" \
|
||||
--mount type=bind,source="$(readlink -f ${kubeconfig_path})",target=/root/.kube/config \
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
kubernetes==11.0.0
|
||||
timeout_decorator==0.4.1
|
||||
pyyaml==5.4.1
|
||||
kubernetes==24.2.0
|
||||
timeout_decorator==0.5.0
|
||||
pyyaml==6.0
|
||||
|
|
|
|||
|
|
@ -8,8 +8,8 @@ IFS=$'\n\t'
|
|||
|
||||
readonly cluster_name="postgres-operator-e2e-tests"
|
||||
readonly kubeconfig_path="/tmp/kind-config-${cluster_name}"
|
||||
readonly spilo_image="registry.opensource.zalan.do/acid/spilo-14-e2e:0.1"
|
||||
readonly e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.3"
|
||||
readonly spilo_image="registry.opensource.zalan.do/acid/spilo-15-e2e:0.1"
|
||||
readonly e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.4"
|
||||
|
||||
export GOPATH=${GOPATH-~/go}
|
||||
export PATH=${GOPATH}/bin:$PATH
|
||||
|
|
|
|||
|
|
@ -23,9 +23,9 @@ class K8sApi:
|
|||
|
||||
self.core_v1 = client.CoreV1Api()
|
||||
self.apps_v1 = client.AppsV1Api()
|
||||
self.batch_v1_beta1 = client.BatchV1beta1Api()
|
||||
self.batch_v1 = client.BatchV1Api()
|
||||
self.custom_objects_api = client.CustomObjectsApi()
|
||||
self.policy_v1_beta1 = client.PolicyV1beta1Api()
|
||||
self.policy_v1 = client.PolicyV1Api()
|
||||
self.storage_v1_api = client.StorageV1Api()
|
||||
|
||||
|
||||
|
|
@ -179,7 +179,7 @@ class K8s:
|
|||
return len(self.api.apps_v1.list_namespaced_deployment(namespace, label_selector=labels).items)
|
||||
|
||||
def count_pdbs_with_label(self, labels, namespace='default'):
|
||||
return len(self.api.policy_v1_beta1.list_namespaced_pod_disruption_budget(
|
||||
return len(self.api.policy_v1.list_namespaced_pod_disruption_budget(
|
||||
namespace, label_selector=labels).items)
|
||||
|
||||
def count_running_pods(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'):
|
||||
|
|
@ -217,7 +217,7 @@ class K8s:
|
|||
time.sleep(self.RETRY_TIMEOUT_SEC)
|
||||
|
||||
def get_logical_backup_job(self, namespace='default'):
|
||||
return self.api.batch_v1_beta1.list_namespaced_cron_job(namespace, label_selector="application=spilo")
|
||||
return self.api.batch_v1.list_namespaced_cron_job(namespace, label_selector="application=spilo")
|
||||
|
||||
def wait_for_logical_backup_job(self, expected_num_of_jobs):
|
||||
while (len(self.get_logical_backup_job().items) != expected_num_of_jobs):
|
||||
|
|
@ -471,7 +471,7 @@ class K8sBase:
|
|||
return len(self.api.apps_v1.list_namespaced_deployment(namespace, label_selector=labels).items)
|
||||
|
||||
def count_pdbs_with_label(self, labels, namespace='default'):
|
||||
return len(self.api.policy_v1_beta1.list_namespaced_pod_disruption_budget(
|
||||
return len(self.api.policy_v1.list_namespaced_pod_disruption_budget(
|
||||
namespace, label_selector=labels).items)
|
||||
|
||||
def count_running_pods(self, labels='application=spilo,cluster-name=acid-minimal-cluster', namespace='default'):
|
||||
|
|
@ -499,7 +499,7 @@ class K8sBase:
|
|||
time.sleep(self.RETRY_TIMEOUT_SEC)
|
||||
|
||||
def get_logical_backup_job(self, namespace='default'):
|
||||
return self.api.batch_v1_beta1.list_namespaced_cron_job(namespace, label_selector="application=spilo")
|
||||
return self.api.batch_v1.list_namespaced_cron_job(namespace, label_selector="application=spilo")
|
||||
|
||||
def wait_for_logical_backup_job(self, expected_num_of_jobs):
|
||||
while (len(self.get_logical_backup_job().items) != expected_num_of_jobs):
|
||||
|
|
|
|||
|
|
@ -12,8 +12,8 @@ from kubernetes import client
|
|||
from tests.k8s_api import K8s
|
||||
from kubernetes.client.rest import ApiException
|
||||
|
||||
SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-14-e2e:0.3"
|
||||
SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-14-e2e:0.4"
|
||||
SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-15-e2e:0.1"
|
||||
SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-15-e2e:0.2"
|
||||
|
||||
|
||||
def to_selector(labels):
|
||||
|
|
@ -250,6 +250,8 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
}
|
||||
k8s.update_config(enable_postgres_team_crd)
|
||||
|
||||
# add team and member to custom-team-membership
|
||||
# contains already elephant user
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
'acid.zalan.do', 'v1', 'default',
|
||||
'postgresteams', 'custom-team-membership',
|
||||
|
|
@ -300,6 +302,13 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "postgres", user_query)), 2,
|
||||
"Database role of replaced member in PostgresTeam not renamed", 10, 5)
|
||||
|
||||
# create fake deletion user so operator fails renaming
|
||||
# but altering role to NOLOGIN will succeed
|
||||
create_fake_deletion_user = """
|
||||
CREATE USER tester_delete_me NOLOGIN;
|
||||
"""
|
||||
self.query_database(leader.metadata.name, "postgres", create_fake_deletion_user)
|
||||
|
||||
# re-add additional member and check if the role is renamed back
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
'acid.zalan.do', 'v1', 'default',
|
||||
|
|
@ -317,11 +326,44 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
user_query = """
|
||||
SELECT rolname
|
||||
FROM pg_catalog.pg_roles
|
||||
WHERE (rolname = 'kind' AND rolcanlogin)
|
||||
OR (rolname = 'tester_delete_me' AND NOT rolcanlogin);
|
||||
WHERE rolname = 'kind' AND rolcanlogin;
|
||||
"""
|
||||
self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "postgres", user_query)), 1,
|
||||
"Database role of recreated member in PostgresTeam not renamed back to original name", 10, 5)
|
||||
|
||||
user_query = """
|
||||
SELECT rolname
|
||||
FROM pg_catalog.pg_roles
|
||||
WHERE rolname IN ('tester','tester_delete_me') AND NOT rolcanlogin;
|
||||
"""
|
||||
self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "postgres", user_query)), 2,
|
||||
"Database role of recreated member in PostgresTeam not renamed back to original name", 10, 5)
|
||||
"Database role of replaced member in PostgresTeam not denied from login", 10, 5)
|
||||
|
||||
# re-add other additional member, operator should grant LOGIN back to tester
|
||||
# but nothing happens to deleted role
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
'acid.zalan.do', 'v1', 'default',
|
||||
'postgresteams', 'custom-team-membership',
|
||||
{
|
||||
'spec': {
|
||||
'additionalMembers': {
|
||||
'e2e': [
|
||||
'kind',
|
||||
'tester'
|
||||
]
|
||||
},
|
||||
}
|
||||
})
|
||||
|
||||
user_query = """
|
||||
SELECT rolname
|
||||
FROM pg_catalog.pg_roles
|
||||
WHERE (rolname IN ('tester', 'kind')
|
||||
AND rolcanlogin)
|
||||
OR (rolname = 'tester_delete_me' AND NOT rolcanlogin);
|
||||
"""
|
||||
self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "postgres", user_query)), 3,
|
||||
"Database role of deleted member in PostgresTeam not removed when recreated manually", 10, 5)
|
||||
|
||||
# revert config change
|
||||
revert_resync = {
|
||||
|
|
@ -353,19 +395,21 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
"spec": {
|
||||
"postgresql": {
|
||||
"parameters": {
|
||||
"max_connections": new_max_connections_value
|
||||
"max_connections": new_max_connections_value,
|
||||
"wal_level": "logical"
|
||||
}
|
||||
},
|
||||
"patroni": {
|
||||
"slots": {
|
||||
"test_slot": {
|
||||
"first_slot": {
|
||||
"type": "physical"
|
||||
}
|
||||
},
|
||||
"ttl": 29,
|
||||
"loop_wait": 9,
|
||||
"retry_timeout": 9,
|
||||
"synchronous_mode": True
|
||||
"synchronous_mode": True,
|
||||
"failsafe_mode": True,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -392,6 +436,10 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
"retry_timeout not updated")
|
||||
self.assertEqual(desired_config["synchronous_mode"], effective_config["synchronous_mode"],
|
||||
"synchronous_mode not updated")
|
||||
self.assertEqual(desired_config["failsafe_mode"], effective_config["failsafe_mode"],
|
||||
"failsafe_mode not updated")
|
||||
self.assertEqual(desired_config["slots"], effective_config["slots"],
|
||||
"slots not updated")
|
||||
return True
|
||||
|
||||
# check if Patroni config has been updated
|
||||
|
|
@ -452,6 +500,84 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
self.eventuallyEqual(lambda: self.query_database(replica.metadata.name, "postgres", setting_query)[0], lower_max_connections_value,
|
||||
"Previous max_connections setting not applied on replica", 10, 5)
|
||||
|
||||
# patch new slot via Patroni REST
|
||||
patroni_slot = "test_patroni_slot"
|
||||
patch_slot_command = """curl -s -XPATCH -d '{"slots": {"test_patroni_slot": {"type": "physical"}}}' localhost:8008/config"""
|
||||
pg_patch_config["spec"]["patroni"]["slots"][patroni_slot] = {"type": "physical"}
|
||||
|
||||
k8s.exec_with_kubectl(leader.metadata.name, patch_slot_command)
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
self.eventuallyTrue(compare_config, "Postgres config not applied")
|
||||
|
||||
# test adding new slots
|
||||
pg_add_new_slots_patch = {
|
||||
"spec": {
|
||||
"patroni": {
|
||||
"slots": {
|
||||
"test_slot": {
|
||||
"type": "logical",
|
||||
"database": "foo",
|
||||
"plugin": "pgoutput"
|
||||
},
|
||||
"test_slot_2": {
|
||||
"type": "physical"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for slot_name, slot_details in pg_add_new_slots_patch["spec"]["patroni"]["slots"].items():
|
||||
pg_patch_config["spec"]["patroni"]["slots"][slot_name] = slot_details
|
||||
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_add_new_slots_patch)
|
||||
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
self.eventuallyTrue(compare_config, "Postgres config not applied")
|
||||
|
||||
# delete test_slot_2 from config and change the database type for test_slot
|
||||
slot_to_change = "test_slot"
|
||||
slot_to_remove = "test_slot_2"
|
||||
pg_delete_slot_patch = {
|
||||
"spec": {
|
||||
"patroni": {
|
||||
"slots": {
|
||||
"test_slot": {
|
||||
"type": "logical",
|
||||
"database": "bar",
|
||||
"plugin": "pgoutput"
|
||||
},
|
||||
"test_slot_2": None
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pg_patch_config["spec"]["patroni"]["slots"][slot_to_change]["database"] = "bar"
|
||||
del pg_patch_config["spec"]["patroni"]["slots"][slot_to_remove]
|
||||
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_delete_slot_patch)
|
||||
|
||||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
self.eventuallyTrue(compare_config, "Postgres config not applied")
|
||||
|
||||
get_slot_query = """
|
||||
SELECT %s
|
||||
FROM pg_replication_slots
|
||||
WHERE slot_name = '%s';
|
||||
"""
|
||||
self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "postgres", get_slot_query%("slot_name", slot_to_remove))), 0,
|
||||
"The replication slot cannot be deleted", 10, 5)
|
||||
|
||||
self.eventuallyEqual(lambda: self.query_database(leader.metadata.name, "postgres", get_slot_query%("database", slot_to_change))[0], "bar",
|
||||
"The replication slot cannot be updated", 10, 5)
|
||||
|
||||
# make sure slot from Patroni didn't get deleted
|
||||
self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "postgres", get_slot_query%("slot_name", patroni_slot))), 1,
|
||||
"The replication slot from Patroni gets deleted", 10, 5)
|
||||
|
||||
except timeout_decorator.TimeoutError:
|
||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
raise
|
||||
|
|
@ -544,6 +670,20 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
'LoadBalancer',
|
||||
"Expected LoadBalancer service type for replica pooler pod, found {}")
|
||||
|
||||
master_annotations = {
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-minimal-cluster-pooler.default.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
}
|
||||
self.eventuallyTrue(lambda: k8s.check_service_annotations(
|
||||
master_pooler_label+","+pooler_label, master_annotations), "Wrong annotations")
|
||||
|
||||
replica_annotations = {
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-minimal-cluster-pooler-repl.default.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
}
|
||||
self.eventuallyTrue(lambda: k8s.check_service_annotations(
|
||||
replica_pooler_label+","+pooler_label, replica_annotations), "Wrong annotations")
|
||||
|
||||
# Turn off only master connection pooler
|
||||
k8s.api.custom_objects_api.patch_namespaced_custom_object(
|
||||
'acid.zalan.do', 'v1', 'default',
|
||||
|
|
@ -806,7 +946,8 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
"AdminRole": "",
|
||||
"Origin": 2,
|
||||
"IsDbOwner": False,
|
||||
"Deleted": False
|
||||
"Deleted": False,
|
||||
"Rotated": False
|
||||
})
|
||||
return True
|
||||
except:
|
||||
|
|
@ -1012,9 +1153,10 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
self.evantuallyEqual(check_version_14, "14", "Version was not upgrade to 14")
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_min_resource_limits(self):
|
||||
def test_resource_generation(self):
|
||||
'''
|
||||
Lower resource limits below configured minimum and let operator fix it
|
||||
Lower resource limits below configured minimum and let operator fix it.
|
||||
It will try to raise requests to limits which is capped with max_memory_request.
|
||||
'''
|
||||
k8s = self.k8s
|
||||
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
|
||||
|
|
@ -1023,17 +1165,20 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
_, replica_nodes = k8s.get_pg_nodes(cluster_label)
|
||||
self.assertNotEqual(replica_nodes, [])
|
||||
|
||||
# configure minimum boundaries for CPU and memory limits
|
||||
# configure maximum memory request and minimum boundaries for CPU and memory limits
|
||||
maxMemoryRequest = '300Mi'
|
||||
minCPULimit = '503m'
|
||||
minMemoryLimit = '502Mi'
|
||||
|
||||
patch_min_resource_limits = {
|
||||
patch_pod_resources = {
|
||||
"data": {
|
||||
"max_memory_request": maxMemoryRequest,
|
||||
"min_cpu_limit": minCPULimit,
|
||||
"min_memory_limit": minMemoryLimit
|
||||
"min_memory_limit": minMemoryLimit,
|
||||
"set_memory_request_to_limit": "true"
|
||||
}
|
||||
}
|
||||
k8s.update_config(patch_min_resource_limits, "Minimum resource test")
|
||||
k8s.update_config(patch_pod_resources, "Pod resource test")
|
||||
|
||||
# lower resource limits below minimum
|
||||
pg_patch_resources = {
|
||||
|
|
@ -1059,18 +1204,20 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
k8s.wait_for_pod_failover(replica_nodes, 'spilo-role=master,' + cluster_label)
|
||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||
|
||||
def verify_pod_limits():
|
||||
def verify_pod_resources():
|
||||
pods = k8s.api.core_v1.list_namespaced_pod('default', label_selector="cluster-name=acid-minimal-cluster,application=spilo").items
|
||||
if len(pods) < 2:
|
||||
return False
|
||||
|
||||
r = pods[0].spec.containers[0].resources.limits['memory'] == minMemoryLimit
|
||||
r = pods[0].spec.containers[0].resources.requests['memory'] == maxMemoryRequest
|
||||
r = r and pods[0].spec.containers[0].resources.limits['memory'] == minMemoryLimit
|
||||
r = r and pods[0].spec.containers[0].resources.limits['cpu'] == minCPULimit
|
||||
r = r and pods[1].spec.containers[0].resources.requests['memory'] == maxMemoryRequest
|
||||
r = r and pods[1].spec.containers[0].resources.limits['memory'] == minMemoryLimit
|
||||
r = r and pods[1].spec.containers[0].resources.limits['cpu'] == minCPULimit
|
||||
return r
|
||||
|
||||
self.eventuallyTrue(verify_pod_limits, "Pod limits where not adjusted")
|
||||
self.eventuallyTrue(verify_pod_resources, "Pod resources where not adjusted")
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_multi_namespace_support(self):
|
||||
|
|
@ -1198,8 +1345,9 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
|
||||
|
||||
# node affinity change should cause another rolling update and relocation of replica
|
||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label)
|
||||
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
|
||||
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
|
||||
|
||||
except timeout_decorator.TimeoutError:
|
||||
print('Operator log: {}'.format(k8s.get_operator_log()))
|
||||
|
|
@ -1209,6 +1357,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
self.assert_distributed_pods(master_nodes)
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
@unittest.skip("Skipping this test until fixed")
|
||||
def test_node_readiness_label(self):
|
||||
'''
|
||||
Remove node readiness label from master node. This must cause a failover.
|
||||
|
|
@ -1338,9 +1487,9 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
# create fake rotation users that should be removed by operator
|
||||
# but have one that would still fit into the retention period
|
||||
create_fake_rotation_user = """
|
||||
CREATE ROLE foo_user201031 IN ROLE foo_user;
|
||||
CREATE ROLE foo_user211031 IN ROLE foo_user;
|
||||
CREATE ROLE foo_user"""+(today-timedelta(days=40)).strftime("%y%m%d")+""" IN ROLE foo_user;
|
||||
CREATE USER foo_user201031 IN ROLE foo_user;
|
||||
CREATE USER foo_user211031 IN ROLE foo_user;
|
||||
CREATE USER foo_user"""+(today-timedelta(days=40)).strftime("%y%m%d")+""" IN ROLE foo_user;
|
||||
"""
|
||||
self.query_database(leader.metadata.name, "postgres", create_fake_rotation_user)
|
||||
|
||||
|
|
@ -1357,6 +1506,12 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
namespace="default",
|
||||
body=secret_fake_rotation)
|
||||
|
||||
# update rolconfig for foo_user that will be copied for new rotation user
|
||||
alter_foo_user_search_path = """
|
||||
ALTER ROLE foo_user SET search_path TO data;
|
||||
"""
|
||||
self.query_database(leader.metadata.name, "postgres", alter_foo_user_search_path)
|
||||
|
||||
# enable password rotation for all other users (foo_user)
|
||||
# this will force a sync of secrets for further assertions
|
||||
enable_password_rotation = {
|
||||
|
|
@ -1392,6 +1547,22 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "postgres", user_query)), 3,
|
||||
"Found incorrect number of rotation users", 10, 5)
|
||||
|
||||
# check if rolconfig was passed from foo_user to foo_user+today
|
||||
# and that no foo_user has been deprecated (can still login)
|
||||
user_query = """
|
||||
SELECT rolname
|
||||
FROM pg_catalog.pg_roles
|
||||
WHERE rolname LIKE 'foo_user%'
|
||||
AND rolconfig = ARRAY['search_path=data']::text[]
|
||||
AND rolcanlogin;
|
||||
"""
|
||||
self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "postgres", user_query)), 2,
|
||||
"Rolconfig not applied to new rotation user", 10, 5)
|
||||
|
||||
# test that rotation_user can connect to the database
|
||||
self.eventuallyEqual(lambda: len(self.query_database_with_user(leader.metadata.name, "postgres", "SELECT 1", "foo_user")), 1,
|
||||
"Could not connect to the database with rotation user {}".format(rotation_user), 10, 5)
|
||||
|
||||
# disable password rotation for all other users (foo_user)
|
||||
# and pick smaller intervals to see if the third fake rotation user is dropped
|
||||
enable_password_rotation = {
|
||||
|
|
@ -1421,7 +1592,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
WHERE rolname LIKE 'foo_user%';
|
||||
"""
|
||||
self.eventuallyEqual(lambda: len(self.query_database(leader.metadata.name, "postgres", user_query)), 2,
|
||||
"Found incorrect number of rotation users", 10, 5)
|
||||
"Found incorrect number of rotation users after disabling password rotation", 10, 5)
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
def test_rolling_update_flag(self):
|
||||
|
|
@ -1479,6 +1650,7 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
raise
|
||||
|
||||
@timeout_decorator.timeout(TEST_TIMEOUT_SEC)
|
||||
@unittest.skip("Skipping this test until fixed")
|
||||
def test_rolling_update_label_timeout(self):
|
||||
'''
|
||||
Simulate case when replica does not receive label in time and rolling update does not finish
|
||||
|
|
@ -1947,5 +2119,28 @@ class EndToEndTestCase(unittest.TestCase):
|
|||
|
||||
return result_set
|
||||
|
||||
def query_database_with_user(self, pod_name, db_name, query, user_name):
|
||||
'''
|
||||
Query database and return result as a list
|
||||
'''
|
||||
k8s = self.k8s
|
||||
result_set = []
|
||||
exec_query = r"PGPASSWORD={} psql -h localhost -U {} -tAq -c \"{}\" -d {}"
|
||||
|
||||
try:
|
||||
user_secret = k8s.get_secret(user_name)
|
||||
secret_user = str(base64.b64decode(user_secret.data["username"]), 'utf-8')
|
||||
secret_pw = str(base64.b64decode(user_secret.data["password"]), 'utf-8')
|
||||
q = exec_query.format(secret_pw, secret_user, query, db_name)
|
||||
q = "su postgres -c \"{}\"".format(q)
|
||||
result = k8s.exec_with_kubectl(pod_name, q)
|
||||
result_set = clean_list(result.stdout.split(b'\n'))
|
||||
except Exception as ex:
|
||||
print('Error on query execution: {}'.format(ex))
|
||||
print('Stdout: {}'.format(result.stdout))
|
||||
print('Stderr: {}'.format(result.stderr))
|
||||
|
||||
return result_set
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
|
|||
53
go.mod
53
go.mod
|
|
@ -1,6 +1,6 @@
|
|||
module github.com/zalando/postgres-operator
|
||||
|
||||
go 1.17
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/aws/aws-sdk-go v1.42.18
|
||||
|
|
@ -11,13 +11,14 @@ require (
|
|||
github.com/r3labs/diff v1.1.0
|
||||
github.com/sirupsen/logrus v1.8.1
|
||||
github.com/stretchr/testify v1.7.0
|
||||
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e
|
||||
golang.org/x/crypto v0.1.0
|
||||
golang.org/x/exp v0.0.0-20230108222341-4b8118a2686a
|
||||
gopkg.in/yaml.v2 v2.4.0
|
||||
k8s.io/api v0.22.4
|
||||
k8s.io/apiextensions-apiserver v0.22.4
|
||||
k8s.io/apimachinery v0.22.4
|
||||
k8s.io/client-go v0.22.4
|
||||
k8s.io/code-generator v0.22.4
|
||||
k8s.io/api v0.23.5
|
||||
k8s.io/apiextensions-apiserver v0.23.5
|
||||
k8s.io/apimachinery v0.23.5
|
||||
k8s.io/client-go v0.23.5
|
||||
k8s.io/code-generator v0.23.5
|
||||
)
|
||||
|
||||
require (
|
||||
|
|
@ -25,45 +26,45 @@ require (
|
|||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/emicklei/go-restful v2.9.5+incompatible // indirect
|
||||
github.com/evanphx/json-patch v4.11.0+incompatible // indirect
|
||||
github.com/go-logr/logr v0.4.0 // indirect
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible // indirect
|
||||
github.com/go-logr/logr v1.2.0 // indirect
|
||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||
github.com/go-openapi/jsonreference v0.19.5 // indirect
|
||||
github.com/go-openapi/swag v0.19.14 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.5 // indirect
|
||||
github.com/google/go-cmp v0.5.8 // indirect
|
||||
github.com/google/gofuzz v1.1.0 // indirect
|
||||
github.com/googleapis/gnostic v0.5.5 // indirect
|
||||
github.com/imdario/mergo v0.3.5 // indirect
|
||||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.11 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/mailru/easyjson v0.7.6 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
golang.org/x/mod v0.5.1 // indirect
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 // indirect
|
||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881 // indirect
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect
|
||||
golang.org/x/text v0.3.6 // indirect
|
||||
golang.org/x/mod v0.6.0 // indirect
|
||||
golang.org/x/net v0.1.0 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
|
||||
golang.org/x/sys v0.1.0 // indirect
|
||||
golang.org/x/term v0.1.0 // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||
golang.org/x/tools v0.1.7 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
golang.org/x/tools v0.2.0 // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.26.0 // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 // indirect
|
||||
k8s.io/klog/v2 v2.9.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c // indirect
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c // indirect
|
||||
k8s.io/klog/v2 v2.30.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
sigs.k8s.io/yaml v1.2.0 // indirect
|
||||
)
|
||||
|
|
|
|||
214
go.sum
214
go.sum
|
|
@ -13,6 +13,11 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV
|
|||
cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs=
|
||||
cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc=
|
||||
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
|
||||
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
|
||||
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
|
||||
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
|
||||
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
|
||||
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
|
||||
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
|
||||
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
|
||||
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
|
||||
|
|
@ -56,6 +61,7 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
|
|||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
|
|
@ -63,11 +69,13 @@ github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:l
|
|||
github.com/aws/aws-sdk-go v1.42.18 h1:2f/cDNwQ3e+yHxtPn1si0to3GalbNHwkRm461IjwRiM=
|
||||
github.com/aws/aws-sdk-go v1.42.18/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs=
|
||||
github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84=
|
||||
github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM=
|
||||
github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA=
|
||||
|
|
@ -79,7 +87,9 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P
|
|||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||
github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo=
|
||||
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
|
||||
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
|
||||
|
|
@ -108,11 +118,13 @@ github.com/emicklei/go-restful v2.9.5+incompatible/go.mod h1:otzb+WCGbkyDHkqmQmT
|
|||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.11.0+incompatible h1:glyUF9yIYtMHzn8xaKw5rMhdWcwsYV8dZHIq5567/xs=
|
||||
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k=
|
||||
|
|
@ -120,6 +132,7 @@ github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoD
|
|||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
|
||||
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
|
|
@ -133,8 +146,9 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V
|
|||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc=
|
||||
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE=
|
||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
|
|
@ -152,6 +166,7 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
|
|||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
|
|
@ -165,6 +180,7 @@ github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
|
|||
github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
|
||||
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
|
||||
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
|
||||
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
|
||||
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
|
|
@ -188,6 +204,8 @@ github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiu
|
|||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/cel-go v0.9.0/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w=
|
||||
github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
|
|
@ -195,14 +213,18 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
|
||||
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g=
|
||||
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
|
|
@ -210,8 +232,11 @@ github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hf
|
|||
github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
|
||||
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
|
|
@ -220,6 +245,7 @@ github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2c
|
|||
github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
|
||||
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
|
|
@ -249,6 +275,7 @@ github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2p
|
|||
github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc=
|
||||
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
|
||||
github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
|
||||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
|
|
@ -263,8 +290,9 @@ github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFF
|
|||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
|
|
@ -276,6 +304,7 @@ github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI
|
|||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs=
|
||||
|
|
@ -287,6 +316,7 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
|||
github.com/lib/pq v1.10.4 h1:SO9z7FRPzA03QhHKJrH5BXA6HU1rS4V2nIVrrNC1iYk=
|
||||
github.com/lib/pq v1.10.4/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||
github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc=
|
||||
github.com/mailru/easyjson v0.7.6 h1:8yTIVnZgCoiM1TgqoeTl+LfU5Jg6/xL3QhGQnimLYnA=
|
||||
|
|
@ -304,6 +334,7 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4
|
|||
github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY=
|
||||
github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
||||
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
|
||||
github.com/moby/term v0.0.0-20210610120745-9d4ed1856297/go.mod h1:vgPCkQMyxTZ7IDy8SXRufE172gr8+K/JE/7hHFxHW3A=
|
||||
|
|
@ -311,8 +342,9 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ
|
|||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d h1:LznySqW8MqVeFh+pW6rOkFdld9QQ7jRydBKKM6jyPVI=
|
||||
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d/go.mod h1:u3hJ0kqCQu/cPpsu3RbCOPZ0d7V3IjPjv1adNRleM9I=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
|
|
@ -337,11 +369,13 @@ github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1y
|
|||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic=
|
||||
github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI=
|
||||
|
|
@ -360,6 +394,7 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
|
|||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
|
|
@ -388,14 +423,18 @@ github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE
|
|||
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
|
||||
github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ=
|
||||
github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk=
|
||||
github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I=
|
||||
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
|
||||
github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo=
|
||||
github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk=
|
||||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg=
|
||||
github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns=
|
||||
github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
|
|
@ -430,6 +469,8 @@ go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
|
|||
go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.20.0/go.mod h1:2AboqHi0CiIZU0qwhtUfCYD1GeUzvvIXWNkhDt7ZMG4=
|
||||
|
|
@ -449,17 +490,19 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/
|
|||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
||||
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e h1:MUP6MR3rJ7Gk9LEia0LP2ytiH6MuCfs7qYz+47jGdD8=
|
||||
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.1.0 h1:MDRAIl0xIo9Io2xV565hzXHw3zVseKrJKodhohM5CjU=
|
||||
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
|
@ -470,6 +513,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0
|
|||
golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4=
|
||||
golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM=
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20230108222341-4b8118a2686a h1:tlXy25amD5A7gOfbXdqCGN5k8ESEed/Ee1E5RcrYnqU=
|
||||
golang.org/x/exp v0.0.0-20230108222341-4b8118a2686a/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
|
|
@ -482,6 +527,7 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl
|
|||
golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs=
|
||||
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
|
||||
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
|
||||
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
|
||||
|
|
@ -491,9 +537,11 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
|
|||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.5.1 h1:OJxoQ/rynoF0dcCdI7cLPktw/hR2cueqYfjm43oqK38=
|
||||
golang.org/x/mod v0.5.1/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro=
|
||||
golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I=
|
||||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
|
|
@ -529,20 +577,36 @@ golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81R
|
|||
golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA=
|
||||
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
|
||||
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=
|
||||
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.1.0 h1:hZ/3BUoy5aId7sCpA/Tc5lt8DkFgdVS2onTpJsZ/fl0=
|
||||
golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602 h1:0Ja1LBD+yisY6RWM/BH7TJVXWsSjs2VwBSmvSX4HdBc=
|
||||
golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f h1:Qmd2pbz05z7z6lm0DrgQVVPuBm92jqujBKMHMOlOQEw=
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
|
@ -595,10 +659,18 @@ golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200831180312-196b9ba8737a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
|
@ -607,20 +679,24 @@ golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881 h1:TyHqChC80pFkXWraUUf6RuB5IqFdQieMLwwCJokV2pc=
|
||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.1.0 h1:g6Z6vPFA9dYBAF7DWcH6sCcOntplXsDKcliusYijMlw=
|
||||
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
|
@ -675,11 +751,19 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
|
|||
golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||
golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE=
|
||||
golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.7 h1:6j8CgantCy3yc8JGBqkDLMKWqZ0RDU2g1HVgacojGWQ=
|
||||
golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
|
||||
golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE=
|
||||
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
|
@ -701,6 +785,12 @@ google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M
|
|||
google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE=
|
||||
google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM=
|
||||
google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc=
|
||||
google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg=
|
||||
google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE=
|
||||
google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8=
|
||||
google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU=
|
||||
google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94=
|
||||
google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
|
||||
|
|
@ -740,8 +830,20 @@ google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7Fc
|
|||
google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A=
|
||||
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
|
||||
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
|
|
@ -754,10 +856,16 @@ google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKa
|
|||
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
|
||||
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
|
||||
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
|
||||
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
|
||||
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
|
||||
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
|
@ -769,8 +877,9 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
|
|||
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.27.1 h1:SnqbnDw1V7RiZcXPx5MEeqPv2s79L9i7BJUlG/+RurQ=
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
|
|
@ -782,6 +891,7 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy
|
|||
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
|
||||
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
|
||||
gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
|
|
@ -810,35 +920,37 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
|||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.22.4 h1:UvyHW0ezB2oIgHAxlYoo6UJQObYXU7awuNarwoHEOjw=
|
||||
k8s.io/api v0.22.4/go.mod h1:Rgs+9gIGYC5laXQSZZ9JqT5NevNgoGiOdVWi1BAB3qk=
|
||||
k8s.io/apiextensions-apiserver v0.22.4 h1:2iGpcVyw4MnAyyXVJU2Xg6ZsbIxAOfRHo0LF5A5J0RA=
|
||||
k8s.io/apiextensions-apiserver v0.22.4/go.mod h1:kH9lxD8dbJ+k0ZizGET55lFgdGjO8t45fgZnCVdZEpw=
|
||||
k8s.io/apimachinery v0.22.4 h1:9uwcvPpukBw/Ri0EUmWz+49cnFtaoiyEhQTK+xOe7Ck=
|
||||
k8s.io/apimachinery v0.22.4/go.mod h1:yU6oA6Gnax9RrxGzVvPFFJ+mpnW6PBSqp0sx0I0HHW0=
|
||||
k8s.io/apiserver v0.22.4/go.mod h1:38WmcUZiiy41A7Aty8/VorWRa8vDGqoUzDf2XYlku0E=
|
||||
k8s.io/client-go v0.22.4 h1:aAQ1Wk+I3bjCNk35YWUqbaueqrIonkfDPJSPDDe8Kfg=
|
||||
k8s.io/client-go v0.22.4/go.mod h1:Yzw4e5e7h1LNHA4uqnMVrpEpUs1hJOiuBsJKIlRCHDA=
|
||||
k8s.io/code-generator v0.22.4 h1:h7lBa5IuEUC4OQ45q/gIip/a0iQcML2iwrRmXksau30=
|
||||
k8s.io/code-generator v0.22.4/go.mod h1:qjYl54pQ/emhkT0UxbufbREYJMWsHNNV/jSVwhYZQGw=
|
||||
k8s.io/component-base v0.22.4/go.mod h1:MrSaQy4a3tFVViff8TZL6JHYSewNCLshZCwHYM58v5A=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027 h1:Uusb3oh8XcdzDF/ndlI4ToKTYVlkCSJP39SRY2mfRAw=
|
||||
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/api v0.23.5 h1:zno3LUiMubxD/V1Zw3ijyKO3wxrhbUF1Ck+VjBvfaoA=
|
||||
k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8=
|
||||
k8s.io/apiextensions-apiserver v0.23.5 h1:5SKzdXyvIJKu+zbfPc3kCbWpbxi+O+zdmAJBm26UJqI=
|
||||
k8s.io/apiextensions-apiserver v0.23.5/go.mod h1:ntcPWNXS8ZPKN+zTXuzYMeg731CP0heCTl6gYBxLcuQ=
|
||||
k8s.io/apimachinery v0.23.5 h1:Va7dwhp8wgkUPWsEXk6XglXWU4IKYLKNlv8VkX7SDM0=
|
||||
k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
|
||||
k8s.io/apiserver v0.23.5/go.mod h1:7wvMtGJ42VRxzgVI7jkbKvMbuCbVbgsWFT7RyXiRNTw=
|
||||
k8s.io/client-go v0.23.5 h1:zUXHmEuqx0RY4+CsnkOn5l0GU+skkRXKGJrhmE2SLd8=
|
||||
k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4=
|
||||
k8s.io/code-generator v0.23.5 h1:xn3a6J5pUL49AoH6SPrOFtnB5cvdMl76f/bEY176R3c=
|
||||
k8s.io/code-generator v0.23.5/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk=
|
||||
k8s.io/component-base v0.23.5/go.mod h1:c5Nq44KZyt1aLl0IpHX82fhsn84Sb0jjzwjpcA42bY0=
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c h1:GohjlNKauSai7gN4wsJkeZ3WAJx4Sh+oT/b5IYn5suA=
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM=
|
||||
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
||||
k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c h1:jvamsI1tn9V0S8jicyX82qaFC0H/NKxv2e5mbqsgR80=
|
||||
k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g=
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw=
|
||||
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4=
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
|
||||
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed h1:ck1fRPWPJWsMd8ZRFsWc6mh/zHp5fZ/shhbrgPUxDAE=
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw=
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s=
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
|
|
|
|||
|
|
@ -18,7 +18,8 @@ trap "cleanup" EXIT SIGINT
|
|||
bash "${CODEGEN_PKG}/generate-groups.sh" all \
|
||||
"${OPERATOR_PACKAGE_ROOT}/pkg/generated" "${OPERATOR_PACKAGE_ROOT}/pkg/apis" \
|
||||
"acid.zalan.do:v1 zalando.org:v1" \
|
||||
--go-header-file "${SCRIPT_ROOT}"/hack/custom-boilerplate.go.txt
|
||||
--go-header-file "${SCRIPT_ROOT}"/hack/custom-boilerplate.go.txt \
|
||||
-o ./
|
||||
|
||||
cp -r "${OPERATOR_PACKAGE_ROOT}"/pkg/* "${TARGET_CODE_DIR}"
|
||||
|
||||
|
|
|
|||
|
|
@ -1,21 +1,21 @@
|
|||
module github.com/zalando/postgres-operator/kubectl-pg
|
||||
|
||||
go 1.17
|
||||
go 1.18
|
||||
|
||||
require (
|
||||
github.com/spf13/cobra v1.2.1
|
||||
github.com/spf13/viper v1.9.0
|
||||
github.com/zalando/postgres-operator v1.8.1
|
||||
k8s.io/api v0.22.4
|
||||
k8s.io/apiextensions-apiserver v0.22.4
|
||||
k8s.io/apimachinery v0.22.4
|
||||
k8s.io/client-go v0.22.4
|
||||
github.com/zalando/postgres-operator v1.8.2
|
||||
k8s.io/api v0.23.5
|
||||
k8s.io/apiextensions-apiserver v0.23.5
|
||||
k8s.io/apimachinery v0.23.5
|
||||
k8s.io/client-go v0.23.5
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/fsnotify/fsnotify v1.5.1 // indirect
|
||||
github.com/go-logr/logr v0.4.0 // indirect
|
||||
github.com/go-logr/logr v1.2.0 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.6 // indirect
|
||||
|
|
@ -24,13 +24,13 @@ require (
|
|||
github.com/hashicorp/hcl v1.0.0 // indirect
|
||||
github.com/imdario/mergo v0.3.5 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.0.0 // indirect
|
||||
github.com/json-iterator/go v1.1.11 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/kr/text v0.2.0 // indirect
|
||||
github.com/magiconair/properties v1.8.5 // indirect
|
||||
github.com/mitchellh/mapstructure v1.4.2 // indirect
|
||||
github.com/moby/spdystream v0.2.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.1 // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d // indirect
|
||||
github.com/pelletier/go-toml v1.9.4 // indirect
|
||||
github.com/sirupsen/logrus v1.8.1 // indirect
|
||||
|
|
@ -39,12 +39,12 @@ require (
|
|||
github.com/spf13/jwalterweatherman v1.1.0 // indirect
|
||||
github.com/spf13/pflag v1.0.5 // indirect
|
||||
github.com/subosito/gotenv v1.2.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 // indirect
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e // indirect
|
||||
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e // indirect
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f // indirect
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf // indirect
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d // indirect
|
||||
golang.org/x/text v0.3.6 // indirect
|
||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881 // indirect
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/time v0.0.0-20210723032227-1f47c861a9ac // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/protobuf v1.27.1 // indirect
|
||||
|
|
@ -52,8 +52,10 @@ require (
|
|||
gopkg.in/ini.v1 v1.63.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b // indirect
|
||||
k8s.io/klog/v2 v2.9.0 // indirect
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.2 // indirect
|
||||
k8s.io/klog/v2 v2.30.0 // indirect
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 // indirect
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed // indirect
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 // indirect
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 // indirect
|
||||
sigs.k8s.io/yaml v1.2.0 // indirect
|
||||
)
|
||||
|
|
|
|||
|
|
@ -65,13 +65,14 @@ github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRF
|
|||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20210826220005-b48c857c3a0e/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY=
|
||||
github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o=
|
||||
github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY=
|
||||
github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8=
|
||||
github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY=
|
||||
github.com/aws/aws-sdk-go v1.41.16/go.mod h1:585smgzpB/KqRA+K3y/NL/oYRqQvpNJYvLm+LY1U59Q=
|
||||
github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM=
|
||||
github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
|
|
@ -124,7 +125,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
|
|||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/evanphx/json-patch v4.11.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU=
|
||||
github.com/felixge/httpsnoop v1.0.1/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
|
||||
|
|
@ -134,6 +135,7 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
|
|||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI=
|
||||
github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU=
|
||||
github.com/getkin/kin-openapi v0.76.0/go.mod h1:660oXbgy5JFMKreazJaQTw7o+X00qeSyhcnluiMv+Xg=
|
||||
github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
|
|
@ -147,8 +149,9 @@ github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V
|
|||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas=
|
||||
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v0.4.0 h1:K7/B1jt6fIBQVd4Owv2MqGQClcgf0R266+7C/QjRcLc=
|
||||
github.com/go-logr/logr v0.4.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
|
||||
github.com/go-logr/logr v1.2.0 h1:QK40JKJyMdUDz+h+xvCsru/bJhvG0UxvePV0ufL/AcE=
|
||||
github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/zapr v1.2.0/go.mod h1:Qa4Bsj2Vb+FAVeAKsLD8RLQ+YRJB8YDmOAKxaBQf7Ro=
|
||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/jsonreference v0.19.3/go.mod h1:rjx6GuL8TTa9VaixXglHmQmIL98+wF9xc8zWvFonSJ8=
|
||||
|
|
@ -163,6 +166,7 @@ github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXP
|
|||
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
|
||||
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
|
||||
github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
|
|
@ -199,6 +203,8 @@ github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW
|
|||
github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ=
|
||||
github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA=
|
||||
github.com/google/cel-go v0.9.0/go.mod h1:U7ayypeSkw23szu4GaQTPJGx66c20mx8JklMSxrmI1w=
|
||||
github.com/google/cel-spec v0.6.0/go.mod h1:Nwjgxy5CbjlPrtCWjeDjUyKMl8w41YBYGjsyDdqk0xA=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
|
|
@ -234,7 +240,6 @@ github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLe
|
|||
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
|
||||
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
|
||||
github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
|
||||
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
|
||||
|
|
@ -243,6 +248,7 @@ github.com/googleapis/gnostic v0.5.1/go.mod h1:6U4PtQXGIEt/Z3h5MAT7FNofLnw9vXk2c
|
|||
github.com/googleapis/gnostic v0.5.5 h1:9fHAtK0uDfpveeqqo1hkEZJcFvYXAiCN3UutL8F9xHw=
|
||||
github.com/googleapis/gnostic v0.5.5/go.mod h1:7+EbHbldMins07ALC74bsA81Ovc97DwqyJO1AENw9kA=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
|
||||
github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA=
|
||||
github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs=
|
||||
|
|
@ -286,16 +292,15 @@ github.com/imdario/mergo v0.3.5 h1:JboBksRwiiAJWvIYJVo46AfV+IAIKZpfrSzVKj42R4Q=
|
|||
github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
|
||||
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
|
||||
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
|
||||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo=
|
||||
github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8=
|
||||
github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.11 h1:uVUAXhF2To8cbw/3xN3pxj6kk7TYKs98NIrTqPlMWAQ=
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
|
|
@ -316,7 +321,6 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
|||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/lib/pq v1.10.3/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
|
||||
github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
|
||||
github.com/magiconair/properties v1.8.5 h1:b6kJs+EmPFMYGkow9GiUyCyOvIwYetYJ3fSaWak/Gls=
|
||||
github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60=
|
||||
|
|
@ -354,8 +358,9 @@ github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJ
|
|||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d h1:LznySqW8MqVeFh+pW6rOkFdld9QQ7jRydBKKM6jyPVI=
|
||||
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d/go.mod h1:u3hJ0kqCQu/cPpsu3RbCOPZ0d7V3IjPjv1adNRleM9I=
|
||||
github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
|
||||
|
|
@ -365,14 +370,17 @@ github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRW
|
|||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e h1:fD57ERR4JtEqsWbfPhv4DMiApHyliiK5xCTNVSPiaAs=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
|
||||
github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
|
||||
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
|
||||
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
|
||||
github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA=
|
||||
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
|
||||
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
|
||||
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc=
|
||||
|
|
@ -404,13 +412,13 @@ github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y8
|
|||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.28.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/r3labs/diff v1.1.0/go.mod h1:7WjXasNzi0vJetRcB/RqNl5dlIsmXcTTLmF5IoH6Xig=
|
||||
github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg=
|
||||
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
|
|
@ -444,7 +452,6 @@ github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t6
|
|||
github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
|
||||
github.com/spf13/jwalterweatherman v1.1.0 h1:ue6voC5bR5F8YxI5S67j9i582FU4Qvo2bmqnqMYADFk=
|
||||
github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo=
|
||||
github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
|
||||
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
|
||||
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
|
||||
|
|
@ -472,8 +479,9 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de
|
|||
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
|
||||
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/zalando/postgres-operator v1.7.1 h1:tDh7utqbrNoCNoQjy3seZiEoO+vT6SgP2+VlnSJ5mpg=
|
||||
github.com/zalando/postgres-operator v1.7.1/go.mod h1:hZTzOQBITJvv5nDHZpGIwyKqOghomxpZQRyWOfkPzKs=
|
||||
github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
|
||||
github.com/zalando/postgres-operator v1.8.2 h1:3FW3j2gXua1MSeE+NiSvB8cxM7k7fyoun46G1v++CCA=
|
||||
github.com/zalando/postgres-operator v1.8.2/go.mod h1:f7AXk8LO/tWFdW4myPJZCwMueGg6fI4RqTuOA0BefZE=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4=
|
||||
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
||||
|
|
@ -509,6 +517,7 @@ go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/
|
|||
go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo=
|
||||
go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
|
|
@ -519,10 +528,9 @@ golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3
|
|||
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e h1:MUP6MR3rJ7Gk9LEia0LP2ytiH6MuCfs7qYz+47jGdD8=
|
||||
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
|
|
@ -603,9 +611,11 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
|
|||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210520170846-37e1c6afe023/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e h1:XpT3nA5TvE525Ne3hInMh6+GETgn27Zfm9dxsThnX2Q=
|
||||
golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63 h1:iocB37TsdFuN6IBRZ+ry36wrkoV51/tl5vOWqkcPGvY=
|
||||
golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
|
@ -704,12 +714,14 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf h1:2ucpDCmfkl8Bd/FsLtiD653Wf96cW37s+iGx93zsu4k=
|
||||
golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/sys v0.0.0-20210831042530-f4d43177bf5e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881 h1:TyHqChC80pFkXWraUUf6RuB5IqFdQieMLwwCJokV2pc=
|
||||
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d h1:SZxvLBoTP5yHO3Frd4z4vrF+DBX9vMVanchswa69toE=
|
||||
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b h1:9zKuko04nR4gjZ4+DNjHqRlAJqbJETHwiNKDqTfOjfE=
|
||||
golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
|
@ -717,8 +729,9 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
|||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
|
@ -786,6 +799,7 @@ golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
|||
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.6-0.20210820212750-d4cc65f0b2ff/go.mod h1:YD9qOF0M9xpSpdWTBbzEl5e/RnCefISl8E5Noe10jFM=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
|
|
@ -860,6 +874,7 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D
|
|||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20200904004341-0bd0a958aa1d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201102152239-715cce707fb0/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
|
|
@ -881,6 +896,7 @@ google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKr
|
|||
google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
|
||||
google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||
|
|
@ -938,6 +954,7 @@ gopkg.in/ini.v1 v1.63.2/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
|
|||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo=
|
||||
gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.0.0-20170812160011-eb3733d160e7/go.mod h1:JAlM8MvJe8wmxCU4Bli9HhUf9+ttbYbLASfIpnQbh74=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
|
@ -962,40 +979,35 @@ honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWh
|
|||
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
|
||||
honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k=
|
||||
k8s.io/api v0.22.3/go.mod h1:azgiXFiXqiWyLCfI62/eYBOu19rj2LKmIhFPP4+33fs=
|
||||
k8s.io/api v0.22.4 h1:UvyHW0ezB2oIgHAxlYoo6UJQObYXU7awuNarwoHEOjw=
|
||||
k8s.io/api v0.22.4/go.mod h1:Rgs+9gIGYC5laXQSZZ9JqT5NevNgoGiOdVWi1BAB3qk=
|
||||
k8s.io/apiextensions-apiserver v0.22.3/go.mod h1:f4plF+CXeqI89jAXL0Ml4LI/kSAZ54JS94+XOX1sae8=
|
||||
k8s.io/apiextensions-apiserver v0.22.4 h1:2iGpcVyw4MnAyyXVJU2Xg6ZsbIxAOfRHo0LF5A5J0RA=
|
||||
k8s.io/apiextensions-apiserver v0.22.4/go.mod h1:kH9lxD8dbJ+k0ZizGET55lFgdGjO8t45fgZnCVdZEpw=
|
||||
k8s.io/apimachinery v0.22.3/go.mod h1:O3oNtNadZdeOMxHFVxOreoznohCpy0z6mocxbZr7oJ0=
|
||||
k8s.io/apimachinery v0.22.4 h1:9uwcvPpukBw/Ri0EUmWz+49cnFtaoiyEhQTK+xOe7Ck=
|
||||
k8s.io/apimachinery v0.22.4/go.mod h1:yU6oA6Gnax9RrxGzVvPFFJ+mpnW6PBSqp0sx0I0HHW0=
|
||||
k8s.io/apiserver v0.22.3/go.mod h1:oam7lH/F1Kto/WTamyQYrD68fS0mGUBORAFf6x/9Mxs=
|
||||
k8s.io/apiserver v0.22.4/go.mod h1:38WmcUZiiy41A7Aty8/VorWRa8vDGqoUzDf2XYlku0E=
|
||||
k8s.io/client-go v0.22.3/go.mod h1:ElDjYf8gvZsKDYexmsmnMQ0DYO8W9RwBjfQ1PI53yow=
|
||||
k8s.io/client-go v0.22.4 h1:aAQ1Wk+I3bjCNk35YWUqbaueqrIonkfDPJSPDDe8Kfg=
|
||||
k8s.io/client-go v0.22.4/go.mod h1:Yzw4e5e7h1LNHA4uqnMVrpEpUs1hJOiuBsJKIlRCHDA=
|
||||
k8s.io/code-generator v0.22.3/go.mod h1:eV77Y09IopzeXOJzndrDyCI88UBok2h6WxAlBwpxa+o=
|
||||
k8s.io/code-generator v0.22.4/go.mod h1:qjYl54pQ/emhkT0UxbufbREYJMWsHNNV/jSVwhYZQGw=
|
||||
k8s.io/component-base v0.22.3/go.mod h1:kuybv1miLCMoOk3ebrqF93GbQHQx6W2287FC0YEQY6s=
|
||||
k8s.io/component-base v0.22.4/go.mod h1:MrSaQy4a3tFVViff8TZL6JHYSewNCLshZCwHYM58v5A=
|
||||
k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0=
|
||||
k8s.io/gengo v0.0.0-20201214224949-b6c5ce23f027/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/api v0.23.5 h1:zno3LUiMubxD/V1Zw3ijyKO3wxrhbUF1Ck+VjBvfaoA=
|
||||
k8s.io/api v0.23.5/go.mod h1:Na4XuKng8PXJ2JsploYYrivXrINeTaycCGcYgF91Xm8=
|
||||
k8s.io/apiextensions-apiserver v0.23.5 h1:5SKzdXyvIJKu+zbfPc3kCbWpbxi+O+zdmAJBm26UJqI=
|
||||
k8s.io/apiextensions-apiserver v0.23.5/go.mod h1:ntcPWNXS8ZPKN+zTXuzYMeg731CP0heCTl6gYBxLcuQ=
|
||||
k8s.io/apimachinery v0.23.5 h1:Va7dwhp8wgkUPWsEXk6XglXWU4IKYLKNlv8VkX7SDM0=
|
||||
k8s.io/apimachinery v0.23.5/go.mod h1:BEuFMMBaIbcOqVIJqNZJXGFTP4W6AycEpb5+m/97hrM=
|
||||
k8s.io/apiserver v0.23.5/go.mod h1:7wvMtGJ42VRxzgVI7jkbKvMbuCbVbgsWFT7RyXiRNTw=
|
||||
k8s.io/client-go v0.23.5 h1:zUXHmEuqx0RY4+CsnkOn5l0GU+skkRXKGJrhmE2SLd8=
|
||||
k8s.io/client-go v0.23.5/go.mod h1:flkeinTO1CirYgzMPRWxUCnV0G4Fbu2vLhYCObnt/r4=
|
||||
k8s.io/code-generator v0.23.5/go.mod h1:S0Q1JVA+kSzTI1oUvbKAxZY/DYbA/ZUb4Uknog12ETk=
|
||||
k8s.io/component-base v0.23.5/go.mod h1:c5Nq44KZyt1aLl0IpHX82fhsn84Sb0jjzwjpcA42bY0=
|
||||
k8s.io/gengo v0.0.0-20210813121822-485abfe95c7c/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
|
||||
k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE=
|
||||
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
|
||||
k8s.io/klog/v2 v2.9.0 h1:D7HV+n1V57XeZ0m6tdRkfknthUaM06VFbWldOFh8kzM=
|
||||
k8s.io/klog/v2 v2.9.0/go.mod h1:hy9LJ/NvuK+iVyP4Ehqva4HxZG/oXyIS3n3Jmire4Ec=
|
||||
k8s.io/kube-openapi v0.0.0-20210421082810-95288971da7e/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
|
||||
k8s.io/kube-openapi v0.0.0-20211109043538-20434351676c/go.mod h1:vHXdDvt9+2spS2Rx9ql3I8tycm3H9FDfdUoIuKCefvw=
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a h1:8dYfu/Fc9Gz2rNJKB9IQRGgQOh2clmRzNIPPY1xLY5g=
|
||||
k8s.io/utils v0.0.0-20210819203725-bdf08cb9a70a/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/klog/v2 v2.30.0 h1:bUO6drIvCIsvZ/XFgfxoGFQU/a4Qkh0iAlvUR7vlHJw=
|
||||
k8s.io/klog/v2 v2.30.0/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65 h1:E3J9oCLlaobFUqsjG9DfKbP2BmgwBL2p7pn0A3dG9W4=
|
||||
k8s.io/kube-openapi v0.0.0-20211115234752-e816edb12b65/go.mod h1:sX9MT8g7NVZM5lVL/j8QyCCJe8YSMW30QvGZWaCIDIk=
|
||||
k8s.io/utils v0.0.0-20210802155522-efc7438f0176/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed h1:ck1fRPWPJWsMd8ZRFsWc6mh/zHp5fZ/shhbrgPUxDAE=
|
||||
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.22/go.mod h1:LEScyzhFmoF5pso/YSeBstl57mOzx9xlU9n85RGrDQg=
|
||||
sigs.k8s.io/apiserver-network-proxy/konnectivity-client v0.0.30/go.mod h1:fEO7lRTdivWO2qYVCVG7dEADOMo/MLDCVr8So2g88Uw=
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6 h1:fD1pz4yfdADVNfFmcP2aBEtudwUQ1AlLnRBALr33v3s=
|
||||
sigs.k8s.io/json v0.0.0-20211020170558-c049b76a60c6/go.mod h1:p4QtZmO4uMYipTQNzagwnNoseA6OxSUutVw05NhYDRs=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.0.2/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.2 h1:Hr/htKFmJEbtMgS/UD0N+gtgctAqz81t3nu+sPzynno=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.1.2/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1 h1:bKCqE9GvQ5tiVHn5rfn1r+yao3aLQEaLzkkmAkf+A6Y=
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.1/go.mod h1:j/nl6xW8vLS49O8YvXW1ocPhZawJtm+Yrr7PPRQ0Vg4=
|
||||
sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q=
|
||||
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
|
||||
|
|
|
|||
|
|
@ -10,7 +10,7 @@ metadata:
|
|||
# "delete-date": "2020-08-31" # can only be deleted on that day if "delete-date "key is configured
|
||||
# "delete-clustername": "acid-test-cluster" # can only be deleted when name matches if "delete-clustername" key is configured
|
||||
spec:
|
||||
dockerImage: registry.opensource.zalan.do/acid/spilo-14:2.1-p6
|
||||
dockerImage: ghcr.io/zalando/spilo-15:2.1-p9
|
||||
teamId: "acid"
|
||||
numberOfInstances: 2
|
||||
users: # Application/Robot users
|
||||
|
|
@ -28,6 +28,8 @@ spec:
|
|||
enableReplicaLoadBalancer: false
|
||||
enableConnectionPooler: false # enable/disable connection pooler deployment
|
||||
enableReplicaConnectionPooler: false # set to enable connectionPooler for replica service
|
||||
enableMasterPoolerLoadBalancer: false
|
||||
enableReplicaPoolerLoadBalancer: false
|
||||
allowedSourceRanges: # load balancers' source ranges for both master and replica services
|
||||
- 127.0.0.1/32
|
||||
databases:
|
||||
|
|
@ -44,7 +46,7 @@ spec:
|
|||
defaultRoles: true
|
||||
defaultUsers: false
|
||||
postgresql:
|
||||
version: "14"
|
||||
version: "15"
|
||||
parameters: # Expert section
|
||||
shared_buffers: "32MB"
|
||||
max_connections: "10"
|
||||
|
|
@ -109,6 +111,7 @@ spec:
|
|||
cpu: 500m
|
||||
memory: 500Mi
|
||||
patroni:
|
||||
failsafe_mode: false
|
||||
initdb:
|
||||
encoding: "UTF8"
|
||||
locale: "en_US.UTF-8"
|
||||
|
|
|
|||
|
|
@ -17,7 +17,7 @@ data:
|
|||
# connection_pooler_default_cpu_request: "500m"
|
||||
# connection_pooler_default_memory_limit: 100Mi
|
||||
# connection_pooler_default_memory_request: 100Mi
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-22"
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-26"
|
||||
# connection_pooler_max_db_connections: 60
|
||||
# connection_pooler_mode: "transaction"
|
||||
# connection_pooler_number_of_instances: 2
|
||||
|
|
@ -34,7 +34,7 @@ data:
|
|||
# default_memory_request: 100Mi
|
||||
# delete_annotation_date_key: delete-date
|
||||
# delete_annotation_name_key: delete-clustername
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-14:2.1-p6
|
||||
docker_image: ghcr.io/zalando/spilo-15:2.1-p9
|
||||
# downscaler_annotations: "deployment-time,downscaler/*"
|
||||
# enable_admin_role_for_users: "true"
|
||||
# enable_crd_registration: "true"
|
||||
|
|
@ -47,16 +47,19 @@ data:
|
|||
enable_master_load_balancer: "false"
|
||||
enable_master_pooler_load_balancer: "false"
|
||||
enable_password_rotation: "false"
|
||||
# enable_patroni_failsafe_mode: "false"
|
||||
enable_pgversion_env_var: "true"
|
||||
# enable_pod_antiaffinity: "false"
|
||||
# enable_pod_disruption_budget: "true"
|
||||
# enable_postgres_team_crd: "false"
|
||||
# enable_postgres_team_crd_superusers: "false"
|
||||
enable_readiness_probe: "false"
|
||||
enable_replica_load_balancer: "false"
|
||||
enable_replica_pooler_load_balancer: "false"
|
||||
# enable_shm_volume: "true"
|
||||
# enable_sidecars: "true"
|
||||
enable_spilo_wal_path_compat: "true"
|
||||
enable_team_id_clustername_prefix: "false"
|
||||
enable_team_member_deprecation: "false"
|
||||
# enable_team_superuser: "false"
|
||||
enable_teams_api: "false"
|
||||
|
|
@ -66,14 +69,22 @@ data:
|
|||
# ignored_annotations: ""
|
||||
# infrastructure_roles_secret_name: "postgresql-infrastructure-roles"
|
||||
# infrastructure_roles_secrets: "secretname:monitoring-roles,userkey:user,passwordkey:password,rolekey:inrole"
|
||||
# ignore_instance_limits_annotation_key: ""
|
||||
# inherited_annotations: owned-by
|
||||
# inherited_labels: application,environment
|
||||
# kube_iam_role: ""
|
||||
# kubernetes_use_configmaps: "false"
|
||||
# log_s3_bucket: ""
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.8.2"
|
||||
# logical_backup_azure_storage_account_name: ""
|
||||
# logical_backup_azure_storage_container: ""
|
||||
# logical_backup_azure_storage_account_key: ""
|
||||
# logical_backup_cpu_limit: ""
|
||||
# logical_backup_cpu_request: ""
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.9.0"
|
||||
# logical_backup_google_application_credentials: ""
|
||||
logical_backup_job_prefix: "logical-backup-"
|
||||
# logical_backup_memory_limit: ""
|
||||
# logical_backup_memory_request: ""
|
||||
logical_backup_provider: "s3"
|
||||
# logical_backup_s3_access_key_id: ""
|
||||
logical_backup_s3_bucket: "my-bucket-url"
|
||||
|
|
@ -85,13 +96,16 @@ data:
|
|||
logical_backup_schedule: "30 00 * * *"
|
||||
major_version_upgrade_mode: "manual"
|
||||
# major_version_upgrade_team_allow_list: ""
|
||||
master_dns_name_format: "{cluster}.{team}.{hostedzone}"
|
||||
master_dns_name_format: "{cluster}.{namespace}.{hostedzone}"
|
||||
# master_legacy_dns_name_format: "{cluster}.{team}.{hostedzone}"
|
||||
# master_pod_move_timeout: 20m
|
||||
# max_instances: "-1"
|
||||
# min_instances: "-1"
|
||||
# max_cpu_request: "1"
|
||||
# max_memory_request: 4Gi
|
||||
# min_cpu_limit: 250m
|
||||
# min_memory_limit: 250Mi
|
||||
# minimal_major_version: "9.6"
|
||||
# minimal_major_version: "11"
|
||||
# node_readiness_label: "status:ready"
|
||||
# node_readiness_label_merge: "OR"
|
||||
# oauth_token_secret_name: postgresql-operator
|
||||
|
|
@ -103,6 +117,7 @@ data:
|
|||
# password_rotation_interval: "90"
|
||||
# password_rotation_user_retention: "180"
|
||||
pdb_name_format: "postgres-{cluster}-pdb"
|
||||
# pod_antiaffinity_preferred_during_scheduling: "false"
|
||||
# pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
||||
pod_deletion_wait_timeout: 10m
|
||||
# pod_environment_configmap: "default/my-custom-config"
|
||||
|
|
@ -120,7 +135,8 @@ data:
|
|||
ready_wait_interval: 3s
|
||||
ready_wait_timeout: 30s
|
||||
repair_period: 5m
|
||||
replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}"
|
||||
replica_dns_name_format: "{cluster}-repl.{namespace}.{hostedzone}"
|
||||
# replica_legacy_dns_name_format: "{cluster}-repl.{team}.{hostedzone}"
|
||||
replication_username: standby
|
||||
resource_check_interval: 3s
|
||||
resource_check_timeout: 10m
|
||||
|
|
@ -128,6 +144,7 @@ data:
|
|||
ring_log_lines: "100"
|
||||
role_deletion_suffix: "_deleted"
|
||||
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
||||
share_pgsocket_with_sidecars: "false"
|
||||
# sidecar_docker_images: ""
|
||||
# set_memory_request_to_limit: "false"
|
||||
spilo_allow_privilege_escalation: "true"
|
||||
|
|
@ -137,7 +154,7 @@ data:
|
|||
spilo_privileged: "false"
|
||||
storage_resize_mode: "pvc"
|
||||
super_username: postgres
|
||||
# target_major_version: "14"
|
||||
# target_major_version: "15"
|
||||
# team_admin_role: "admin"
|
||||
# team_api_role_configuration: "log_statement:all"
|
||||
# teams_api_url: http://fake-teams-api.default.svc.cluster.local
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
namespace: kube-system
|
||||
name: standard
|
||||
annotations:
|
||||
storageclass.kubernetes.io/is-default-class: "true"
|
||||
|
|
|
|||
|
|
@ -4,6 +4,9 @@ metadata:
|
|||
name: fake-teams-api
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
name: fake-teams-api
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
|
|
@ -37,8 +40,6 @@ metadata:
|
|||
name: postgresql-operator
|
||||
namespace: default
|
||||
type: Opaque
|
||||
data:
|
||||
apiVersion: v1
|
||||
data:
|
||||
read-only-token-secret: dGVzdHRva2Vu
|
||||
read-only-token-type: QmVhcmVy
|
||||
|
|
|
|||
|
|
@ -8,5 +8,4 @@ data:
|
|||
kind: Secret
|
||||
metadata:
|
||||
name: postgresql-infrastructure-roles-new
|
||||
namespace: default
|
||||
type: Opaque
|
||||
|
|
|
|||
|
|
@ -21,5 +21,4 @@ data:
|
|||
kind: Secret
|
||||
metadata:
|
||||
name: postgresql-infrastructure-roles
|
||||
namespace: default
|
||||
type: Opaque
|
||||
|
|
|
|||
|
|
@ -23,7 +23,7 @@ spec:
|
|||
serviceAccountName: postgres-operator
|
||||
containers:
|
||||
- name: postgres-operator
|
||||
image: registry.opensource.zalan.do/acid/pgbouncer:master-22
|
||||
image: registry.opensource.zalan.do/acid/pgbouncer:master-26
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ apiVersion: "acid.zalan.do/v1"
|
|||
kind: postgresql
|
||||
metadata:
|
||||
name: acid-upgrade-test
|
||||
namespace: default
|
||||
spec:
|
||||
teamId: "acid"
|
||||
volume:
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ apiVersion: "acid.zalan.do/v1"
|
|||
kind: postgresql
|
||||
metadata:
|
||||
name: acid-minimal-cluster
|
||||
namespace: default
|
||||
spec:
|
||||
teamId: "acid"
|
||||
volume:
|
||||
|
|
@ -18,4 +17,4 @@ spec:
|
|||
preparedDatabases:
|
||||
bar: {}
|
||||
postgresql:
|
||||
version: "14"
|
||||
version: "15"
|
||||
|
|
|
|||
|
|
@ -66,7 +66,7 @@ spec:
|
|||
type: string
|
||||
docker_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/spilo-14:2.1-p6"
|
||||
default: "ghcr.io/zalando/spilo-15:2.1-p9"
|
||||
enable_crd_registration:
|
||||
type: boolean
|
||||
default: true
|
||||
|
|
@ -86,9 +86,14 @@ spec:
|
|||
enable_spilo_wal_path_compat:
|
||||
type: boolean
|
||||
default: false
|
||||
enable_team_id_clustername_prefix:
|
||||
type: boolean
|
||||
default: false
|
||||
etcd_host:
|
||||
type: string
|
||||
default: ""
|
||||
ignore_instance_limits_annotation_key:
|
||||
type: string
|
||||
kubernetes_use_configmaps:
|
||||
type: boolean
|
||||
default: false
|
||||
|
|
@ -160,10 +165,10 @@ spec:
|
|||
type: string
|
||||
minimal_major_version:
|
||||
type: string
|
||||
default: "9.6"
|
||||
default: "11"
|
||||
target_major_version:
|
||||
type: string
|
||||
default: "14"
|
||||
default: "15"
|
||||
kubernetes:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -207,6 +212,9 @@ spec:
|
|||
enable_pod_disruption_budget:
|
||||
type: boolean
|
||||
default: true
|
||||
enable_readiness_probe:
|
||||
type: boolean
|
||||
default: false
|
||||
enable_sidecars:
|
||||
type: boolean
|
||||
default: true
|
||||
|
|
@ -268,6 +276,9 @@ spec:
|
|||
pdb_name_format:
|
||||
type: string
|
||||
default: "postgres-{cluster}-pdb"
|
||||
pod_antiaffinity_preferred_during_scheduling:
|
||||
type: boolean
|
||||
default: false
|
||||
pod_antiaffinity_topology_key:
|
||||
type: string
|
||||
default: "kubernetes.io/hostname"
|
||||
|
|
@ -301,6 +312,9 @@ spec:
|
|||
secret_name_template:
|
||||
type: string
|
||||
default: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
||||
share_pgsocket_with_sidecars:
|
||||
type: boolean
|
||||
default: false
|
||||
spilo_allow_privilege_escalation:
|
||||
type: boolean
|
||||
default: true
|
||||
|
|
@ -317,6 +331,7 @@ spec:
|
|||
type: string
|
||||
enum:
|
||||
- "ebs"
|
||||
- "mixed"
|
||||
- "pvc"
|
||||
- "off"
|
||||
default: "pvc"
|
||||
|
|
@ -345,6 +360,12 @@ spec:
|
|||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
default: "100Mi"
|
||||
max_cpu_request:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
max_memory_request:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
min_cpu_limit:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
|
|
@ -409,9 +430,15 @@ spec:
|
|||
- "Local"
|
||||
default: "Cluster"
|
||||
master_dns_name_format:
|
||||
type: string
|
||||
default: "{cluster}.{namespace}.{hostedzone}"
|
||||
master_legacy_dns_name_format:
|
||||
type: string
|
||||
default: "{cluster}.{team}.{hostedzone}"
|
||||
replica_dns_name_format:
|
||||
type: string
|
||||
default: "{cluster}-repl.{namespace}.{hostedzone}"
|
||||
replica_legacy_dns_name_format:
|
||||
type: string
|
||||
default: "{cluster}-repl.{team}.{hostedzone}"
|
||||
aws_or_gcp:
|
||||
|
|
@ -446,16 +473,38 @@ spec:
|
|||
logical_backup:
|
||||
type: object
|
||||
properties:
|
||||
logical_backup_azure_storage_account_name:
|
||||
type: string
|
||||
logical_backup_azure_storage_container:
|
||||
type: string
|
||||
logical_backup_azure_storage_account_key:
|
||||
type: string
|
||||
logical_backup_cpu_limit:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
logical_backup_cpu_request:
|
||||
type: string
|
||||
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
|
||||
logical_backup_docker_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/logical-backup:v1.8.2"
|
||||
default: "registry.opensource.zalan.do/acid/logical-backup:v1.9.0"
|
||||
logical_backup_google_application_credentials:
|
||||
type: string
|
||||
logical_backup_job_prefix:
|
||||
type: string
|
||||
default: "logical-backup-"
|
||||
logical_backup_memory_limit:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
logical_backup_memory_request:
|
||||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
logical_backup_provider:
|
||||
type: string
|
||||
enum:
|
||||
- "az"
|
||||
- "gcs"
|
||||
- "s3"
|
||||
default: "s3"
|
||||
logical_backup_s3_access_key_id:
|
||||
type: string
|
||||
|
|
@ -586,7 +635,7 @@ spec:
|
|||
default: "pooler"
|
||||
connection_pooler_image:
|
||||
type: string
|
||||
default: "registry.opensource.zalan.do/acid/pgbouncer:master-22"
|
||||
default: "registry.opensource.zalan.do/acid/pgbouncer:master-26"
|
||||
connection_pooler_max_db_connections:
|
||||
type: integer
|
||||
default: 60
|
||||
|
|
@ -616,6 +665,12 @@ spec:
|
|||
type: string
|
||||
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
|
||||
default: "100Mi"
|
||||
patroni:
|
||||
type: object
|
||||
properties:
|
||||
failsafe_mode:
|
||||
type: boolean
|
||||
default: false
|
||||
status:
|
||||
type: object
|
||||
additionalProperties:
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ apiVersion: "zalando.org/v1"
|
|||
kind: PlatformCredentialsSet
|
||||
metadata:
|
||||
name: postgresql-operator
|
||||
namespace: acid
|
||||
spec:
|
||||
application: postgresql-operator
|
||||
tokens:
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ spec:
|
|||
serviceAccountName: postgres-operator
|
||||
containers:
|
||||
- name: postgres-operator
|
||||
image: registry.opensource.zalan.do/acid/postgres-operator:v1.8.2
|
||||
image: registry.opensource.zalan.do/acid/postgres-operator:v1.9.0
|
||||
imagePullPolicy: IfNotPresent
|
||||
resources:
|
||||
requests:
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ kind: OperatorConfiguration
|
|||
metadata:
|
||||
name: postgresql-operator-default-configuration
|
||||
configuration:
|
||||
docker_image: registry.opensource.zalan.do/acid/spilo-14:2.1-p6
|
||||
docker_image: ghcr.io/zalando/spilo-15:2.1-p9
|
||||
# enable_crd_registration: true
|
||||
# crd_categories:
|
||||
# - all
|
||||
|
|
@ -11,7 +11,9 @@ configuration:
|
|||
enable_pgversion_env_var: true
|
||||
# enable_shm_volume: true
|
||||
enable_spilo_wal_path_compat: false
|
||||
enable_team_id_clustername_prefix: false
|
||||
etcd_host: ""
|
||||
# ignore_instance_limits_annotation_key: ""
|
||||
# kubernetes_use_configmaps: false
|
||||
max_instances: -1
|
||||
min_instances: -1
|
||||
|
|
@ -37,8 +39,8 @@ configuration:
|
|||
major_version_upgrade_mode: "off"
|
||||
# major_version_upgrade_team_allow_list:
|
||||
# - acid
|
||||
minimal_major_version: "9.6"
|
||||
target_major_version: "14"
|
||||
minimal_major_version: "11"
|
||||
target_major_version: "15"
|
||||
kubernetes:
|
||||
# additional_pod_capabilities:
|
||||
# - "SYS_NICE"
|
||||
|
|
@ -58,6 +60,7 @@ configuration:
|
|||
enable_init_containers: true
|
||||
enable_pod_antiaffinity: false
|
||||
enable_pod_disruption_budget: true
|
||||
enable_readiness_probe: false
|
||||
enable_sidecars: true
|
||||
# ignored_annotations:
|
||||
# - k8s.v1.cni.cncf.io/network-status
|
||||
|
|
@ -81,6 +84,7 @@ configuration:
|
|||
# node_readiness_label_merge: "OR"
|
||||
oauth_token_secret_name: postgresql-operator
|
||||
pdb_name_format: "postgres-{cluster}-pdb"
|
||||
pod_antiaffinity_preferred_during_scheduling: false
|
||||
pod_antiaffinity_topology_key: "kubernetes.io/hostname"
|
||||
# pod_environment_configmap: "default/my-custom-config"
|
||||
# pod_environment_secret: "my-custom-secret"
|
||||
|
|
@ -92,6 +96,7 @@ configuration:
|
|||
# pod_service_account_role_binding_definition: ""
|
||||
pod_terminate_grace_period: 5m
|
||||
secret_name_template: "{username}.{cluster}.credentials.{tprkind}.{tprgroup}"
|
||||
share_pgsocket_with_sidecars: false
|
||||
spilo_allow_privilege_escalation: true
|
||||
# spilo_runasuser: 101
|
||||
# spilo_runasgroup: 103
|
||||
|
|
@ -108,6 +113,8 @@ configuration:
|
|||
default_cpu_request: 100m
|
||||
default_memory_limit: 500Mi
|
||||
default_memory_request: 100Mi
|
||||
# max_cpu_request: "1"
|
||||
# max_memory_request: 4Gi
|
||||
# min_cpu_limit: 250m
|
||||
# min_memory_limit: 250Mi
|
||||
timeouts:
|
||||
|
|
@ -129,8 +136,10 @@ configuration:
|
|||
enable_replica_load_balancer: false
|
||||
enable_replica_pooler_load_balancer: false
|
||||
external_traffic_policy: "Cluster"
|
||||
master_dns_name_format: "{cluster}.{team}.{hostedzone}"
|
||||
replica_dns_name_format: "{cluster}-repl.{team}.{hostedzone}"
|
||||
master_dns_name_format: "{cluster}.{namespace}.{hostedzone}"
|
||||
# master_legacy_dns_name_format: "{cluster}.{team}.{hostedzone}"
|
||||
replica_dns_name_format: "{cluster}-repl.{namespace}.{hostedzone}"
|
||||
# replica_dns_old_name_format: "{cluster}-repl.{team}.{hostedzone}"
|
||||
aws_or_gcp:
|
||||
# additional_secret_mount: "some-secret-name"
|
||||
# additional_secret_mount_path: "/some/dir"
|
||||
|
|
@ -144,7 +153,14 @@ configuration:
|
|||
# wal_gs_bucket: ""
|
||||
# wal_s3_bucket: ""
|
||||
logical_backup:
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.8.2"
|
||||
# logical_backup_azure_storage_account_name: ""
|
||||
# logical_backup_azure_storage_container: ""
|
||||
# logical_backup_azure_storage_account_key: ""
|
||||
# logical_backup_cpu_limit: ""
|
||||
# logical_backup_cpu_request: ""
|
||||
# logical_backup_memory_limit: ""
|
||||
# logical_backup_memory_request: ""
|
||||
logical_backup_docker_image: "registry.opensource.zalan.do/acid/logical-backup:v1.9.0"
|
||||
# logical_backup_google_application_credentials: ""
|
||||
logical_backup_job_prefix: "logical-backup-"
|
||||
logical_backup_provider: "s3"
|
||||
|
|
@ -187,9 +203,11 @@ configuration:
|
|||
connection_pooler_default_cpu_request: "500m"
|
||||
connection_pooler_default_memory_limit: 100Mi
|
||||
connection_pooler_default_memory_request: 100Mi
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-22"
|
||||
connection_pooler_image: "registry.opensource.zalan.do/acid/pgbouncer:master-26"
|
||||
# connection_pooler_max_db_connections: 60
|
||||
connection_pooler_mode: "transaction"
|
||||
connection_pooler_number_of_instances: 2
|
||||
# connection_pooler_schema: "pooler"
|
||||
# connection_pooler_user: "pooler"
|
||||
# patroni:
|
||||
# failsafe_mode: "false"
|
||||
|
|
|
|||
|
|
@ -221,6 +221,10 @@ spec:
|
|||
items:
|
||||
type: string
|
||||
pattern: '^\ *((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))-((Mon|Tue|Wed|Thu|Fri|Sat|Sun):(2[0-3]|[01]?\d):([0-5]?\d)|(2[0-3]|[01]?\d):([0-5]?\d))\ *$'
|
||||
masterServiceAnnotations:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
nodeAffinity:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -318,6 +322,8 @@ spec:
|
|||
patroni:
|
||||
type: object
|
||||
properties:
|
||||
failsafe_mode:
|
||||
type: boolean
|
||||
initdb:
|
||||
type: object
|
||||
additionalProperties:
|
||||
|
|
@ -363,13 +369,12 @@ spec:
|
|||
version:
|
||||
type: string
|
||||
enum:
|
||||
- "9.5"
|
||||
- "9.6"
|
||||
- "10"
|
||||
- "11"
|
||||
- "12"
|
||||
- "13"
|
||||
- "14"
|
||||
- "15"
|
||||
parameters:
|
||||
type: object
|
||||
additionalProperties:
|
||||
|
|
@ -399,6 +404,10 @@ spec:
|
|||
replicaLoadBalancer:
|
||||
type: boolean
|
||||
description: deprecated
|
||||
replicaServiceAnnotations:
|
||||
type: object
|
||||
additionalProperties:
|
||||
type: string
|
||||
resources:
|
||||
type: object
|
||||
properties:
|
||||
|
|
@ -618,7 +627,7 @@ spec:
|
|||
operator:
|
||||
type: string
|
||||
enum:
|
||||
- DoesNotExists
|
||||
- DoesNotExist
|
||||
- Exists
|
||||
- In
|
||||
- NotIn
|
||||
|
|
|
|||
|
|
@ -2,14 +2,13 @@ apiVersion: "acid.zalan.do/v1"
|
|||
kind: postgresql
|
||||
metadata:
|
||||
name: acid-standby-cluster
|
||||
namespace: default
|
||||
spec:
|
||||
teamId: "acid"
|
||||
volume:
|
||||
size: 1Gi
|
||||
numberOfInstances: 1
|
||||
postgresql:
|
||||
version: "14"
|
||||
version: "15"
|
||||
# Make this a standby cluster and provide either the s3 bucket path of source cluster or the remote primary host for continuous streaming.
|
||||
standby:
|
||||
# s3_wal_path: "s3://mybucket/spilo/acid-minimal-cluster/abcd1234-2a4b-4b2a-8c9c-c1234defg567/wal/14/"
|
||||
|
|
|
|||
|
|
@ -355,6 +355,14 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
},
|
||||
},
|
||||
},
|
||||
"masterServiceAnnotations": {
|
||||
Type: "object",
|
||||
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
|
||||
Schema: &apiextv1.JSONSchemaProps{
|
||||
Type: "string",
|
||||
},
|
||||
},
|
||||
},
|
||||
"nodeAffinity": {
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
|
|
@ -503,6 +511,9 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
"patroni": {
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"failsafe_mode": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"initdb": {
|
||||
Type: "object",
|
||||
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
|
||||
|
|
@ -577,12 +588,6 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
"version": {
|
||||
Type: "string",
|
||||
Enum: []apiextv1.JSON{
|
||||
{
|
||||
Raw: []byte(`"9.5"`),
|
||||
},
|
||||
{
|
||||
Raw: []byte(`"9.6"`),
|
||||
},
|
||||
{
|
||||
Raw: []byte(`"10"`),
|
||||
},
|
||||
|
|
@ -598,6 +603,9 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
{
|
||||
Raw: []byte(`"14"`),
|
||||
},
|
||||
{
|
||||
Raw: []byte(`"15"`),
|
||||
},
|
||||
},
|
||||
},
|
||||
"parameters": {
|
||||
|
|
@ -654,6 +662,14 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
Type: "boolean",
|
||||
Description: "deprecated",
|
||||
},
|
||||
"replicaServiceAnnotations": {
|
||||
Type: "object",
|
||||
AdditionalProperties: &apiextv1.JSONSchemaPropsOrBool{
|
||||
Schema: &apiextv1.JSONSchemaProps{
|
||||
Type: "string",
|
||||
},
|
||||
},
|
||||
},
|
||||
"resources": {
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
|
|
@ -1112,9 +1128,15 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
"enable_spilo_wal_path_compat": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"enable_team_id_clustername_prefix": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"etcd_host": {
|
||||
Type: "string",
|
||||
},
|
||||
"ignore_instance_limits_annotation_key": {
|
||||
Type: "string",
|
||||
},
|
||||
"kubernetes_use_configmaps": {
|
||||
Type: "boolean",
|
||||
},
|
||||
|
|
@ -1269,6 +1291,9 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
"enable_pod_disruption_budget": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"enable_readiness_probe": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"enable_sidecars": {
|
||||
Type: "boolean",
|
||||
},
|
||||
|
|
@ -1363,6 +1388,9 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
"pdb_name_format": {
|
||||
Type: "string",
|
||||
},
|
||||
"pod_antiaffinity_preferred_during_scheduling": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"pod_antiaffinity_topology_key": {
|
||||
Type: "string",
|
||||
},
|
||||
|
|
@ -1404,6 +1432,9 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
"secret_name_template": {
|
||||
Type: "string",
|
||||
},
|
||||
"share_pgsocket_with_sidecars": {
|
||||
Type: "boolean",
|
||||
},
|
||||
"spilo_runasuser": {
|
||||
Type: "integer",
|
||||
},
|
||||
|
|
@ -1425,6 +1456,9 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
{
|
||||
Raw: []byte(`"ebs"`),
|
||||
},
|
||||
{
|
||||
Raw: []byte(`"mixed"`),
|
||||
},
|
||||
{
|
||||
Raw: []byte(`"pvc"`),
|
||||
},
|
||||
|
|
@ -1446,6 +1480,14 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
},
|
||||
},
|
||||
},
|
||||
"patroni": {
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"failsafe_mode": {
|
||||
Type: "boolean",
|
||||
},
|
||||
},
|
||||
},
|
||||
"postgres_pod_resources": {
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
|
|
@ -1465,6 +1507,14 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
Type: "string",
|
||||
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
||||
},
|
||||
"max_cpu_request": {
|
||||
Type: "string",
|
||||
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
|
||||
},
|
||||
"max_memory_request": {
|
||||
Type: "string",
|
||||
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
||||
},
|
||||
"min_cpu_limit": {
|
||||
Type: "string",
|
||||
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
|
||||
|
|
@ -1544,9 +1594,15 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
"master_dns_name_format": {
|
||||
Type: "string",
|
||||
},
|
||||
"master_legacy_dns_name_format": {
|
||||
Type: "string",
|
||||
},
|
||||
"replica_dns_name_format": {
|
||||
Type: "string",
|
||||
},
|
||||
"replica_legacy_dns_name_format": {
|
||||
Type: "string",
|
||||
},
|
||||
},
|
||||
},
|
||||
"aws_or_gcp": {
|
||||
|
|
@ -1584,6 +1640,23 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
"logical_backup": {
|
||||
Type: "object",
|
||||
Properties: map[string]apiextv1.JSONSchemaProps{
|
||||
"logical_backup_azure_storage_account_name": {
|
||||
Type: "string",
|
||||
},
|
||||
"logical_backup_azure_storage_container": {
|
||||
Type: "string",
|
||||
},
|
||||
"logical_backup_azure_storage_account_key": {
|
||||
Type: "string",
|
||||
},
|
||||
"logical_backup_cpu_limit": {
|
||||
Type: "string",
|
||||
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
|
||||
},
|
||||
"logical_backup_cpu_request": {
|
||||
Type: "string",
|
||||
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
|
||||
},
|
||||
"logical_backup_docker_image": {
|
||||
Type: "string",
|
||||
},
|
||||
|
|
@ -1593,8 +1666,27 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
|
|||
"logical_backup_job_prefix": {
|
||||
Type: "string",
|
||||
},
|
||||
"logical_backup_memory_limit": {
|
||||
Type: "string",
|
||||
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
||||
},
|
||||
"logical_backup_memory_request": {
|
||||
Type: "string",
|
||||
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
|
||||
},
|
||||
"logical_backup_provider": {
|
||||
Type: "string",
|
||||
Enum: []apiextv1.JSON{
|
||||
{
|
||||
Raw: []byte(`"az"`),
|
||||
},
|
||||
{
|
||||
Raw: []byte(`"gcs"`),
|
||||
},
|
||||
{
|
||||
Raw: []byte(`"s3"`),
|
||||
},
|
||||
},
|
||||
},
|
||||
"logical_backup_s3_access_key_id": {
|
||||
Type: "string",
|
||||
|
|
|
|||
|
|
@ -110,15 +110,9 @@ func (p *Postgresql) UnmarshalJSON(data []byte) error {
|
|||
}
|
||||
tmp2 := Postgresql(tmp)
|
||||
|
||||
if clusterName, err := extractClusterName(tmp2.ObjectMeta.Name, tmp2.Spec.TeamID); err != nil {
|
||||
tmp2.Error = err.Error()
|
||||
tmp2.Status = PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid}
|
||||
} else if err := validateCloneClusterDescription(tmp2.Spec.Clone); err != nil {
|
||||
|
||||
if err := validateCloneClusterDescription(tmp2.Spec.Clone); err != nil {
|
||||
tmp2.Error = err.Error()
|
||||
tmp2.Status.PostgresClusterStatus = ClusterStatusInvalid
|
||||
} else {
|
||||
tmp2.Spec.ClusterName = clusterName
|
||||
}
|
||||
|
||||
*p = tmp2
|
||||
|
|
|
|||
|
|
@ -49,8 +49,8 @@ type PostgresUsersConfiguration struct {
|
|||
type MajorVersionUpgradeConfiguration struct {
|
||||
MajorVersionUpgradeMode string `json:"major_version_upgrade_mode" default:"off"` // off - no actions, manual - manifest triggers action, full - manifest and minimal version violation trigger upgrade
|
||||
MajorVersionUpgradeTeamAllowList []string `json:"major_version_upgrade_team_allow_list,omitempty"`
|
||||
MinimalMajorVersion string `json:"minimal_major_version" default:"9.6"`
|
||||
TargetMajorVersion string `json:"target_major_version" default:"14"`
|
||||
MinimalMajorVersion string `json:"minimal_major_version" default:"11"`
|
||||
TargetMajorVersion string `json:"target_major_version" default:"15"`
|
||||
}
|
||||
|
||||
// KubernetesMetaConfiguration defines k8s conf required for all Postgres clusters and the operator itself
|
||||
|
|
@ -72,6 +72,7 @@ type KubernetesMetaConfiguration struct {
|
|||
StorageResizeMode string `json:"storage_resize_mode,omitempty"`
|
||||
EnableInitContainers *bool `json:"enable_init_containers,omitempty"`
|
||||
EnableSidecars *bool `json:"enable_sidecars,omitempty"`
|
||||
SharePgSocketWithSidecars *bool `json:"share_pgsocket_with_sidecars,omitempty"`
|
||||
SecretNameTemplate config.StringTemplate `json:"secret_name_template,omitempty"`
|
||||
ClusterDomain string `json:"cluster_domain,omitempty"`
|
||||
OAuthTokenSecretName spec.NamespacedName `json:"oauth_token_secret_name,omitempty"`
|
||||
|
|
@ -96,8 +97,10 @@ type KubernetesMetaConfiguration struct {
|
|||
PodPriorityClassName string `json:"pod_priority_class_name,omitempty"`
|
||||
MasterPodMoveTimeout Duration `json:"master_pod_move_timeout,omitempty"`
|
||||
EnablePodAntiAffinity bool `json:"enable_pod_antiaffinity,omitempty"`
|
||||
PodAntiAffinityPreferredDuringScheduling bool `json:"pod_antiaffinity_preferred_during_scheduling,omitempty"`
|
||||
PodAntiAffinityTopologyKey string `json:"pod_antiaffinity_topology_key,omitempty"`
|
||||
PodManagementPolicy string `json:"pod_management_policy,omitempty"`
|
||||
EnableReadinessProbe bool `json:"enable_readiness_probe,omitempty"`
|
||||
EnableCrossNamespaceSecret bool `json:"enable_cross_namespace_secret,omitempty"`
|
||||
}
|
||||
|
||||
|
|
@ -109,6 +112,8 @@ type PostgresPodResourcesDefaults struct {
|
|||
DefaultMemoryLimit string `json:"default_memory_limit,omitempty"`
|
||||
MinCPULimit string `json:"min_cpu_limit,omitempty"`
|
||||
MinMemoryLimit string `json:"min_memory_limit,omitempty"`
|
||||
MaxCPURequest string `json:"max_cpu_request,omitempty"`
|
||||
MaxMemoryRequest string `json:"max_memory_request,omitempty"`
|
||||
}
|
||||
|
||||
// OperatorTimeouts defines the timeout of ResourceCheck, PodWait, ReadyWait
|
||||
|
|
@ -132,7 +137,9 @@ type LoadBalancerConfiguration struct {
|
|||
EnableReplicaPoolerLoadBalancer bool `json:"enable_replica_pooler_load_balancer,omitempty"`
|
||||
CustomServiceAnnotations map[string]string `json:"custom_service_annotations,omitempty"`
|
||||
MasterDNSNameFormat config.StringTemplate `json:"master_dns_name_format,omitempty"`
|
||||
MasterLegacyDNSNameFormat config.StringTemplate `json:"master_legacy_dns_name_format,omitempty"`
|
||||
ReplicaDNSNameFormat config.StringTemplate `json:"replica_dns_name_format,omitempty"`
|
||||
ReplicaLegacyDNSNameFormat config.StringTemplate `json:"replica_legacy_dns_name_format,omitempty"`
|
||||
ExternalTrafficPolicy string `json:"external_traffic_policy" default:"Cluster"`
|
||||
}
|
||||
|
||||
|
|
@ -213,6 +220,9 @@ type OperatorLogicalBackupConfiguration struct {
|
|||
Schedule string `json:"logical_backup_schedule,omitempty"`
|
||||
DockerImage string `json:"logical_backup_docker_image,omitempty"`
|
||||
BackupProvider string `json:"logical_backup_provider,omitempty"`
|
||||
AzureStorageAccountName string `json:"logical_backup_azure_storage_account_name,omitempty"`
|
||||
AzureStorageContainer string `json:"logical_backup_azure_storage_container,omitempty"`
|
||||
AzureStorageAccountKey string `json:"logical_backup_azure_storage_account_key,omitempty"`
|
||||
S3Bucket string `json:"logical_backup_s3_bucket,omitempty"`
|
||||
S3Region string `json:"logical_backup_s3_region,omitempty"`
|
||||
S3Endpoint string `json:"logical_backup_s3_endpoint,omitempty"`
|
||||
|
|
@ -222,6 +232,15 @@ type OperatorLogicalBackupConfiguration struct {
|
|||
RetentionTime string `json:"logical_backup_s3_retention_time,omitempty"`
|
||||
GoogleApplicationCredentials string `json:"logical_backup_google_application_credentials,omitempty"`
|
||||
JobPrefix string `json:"logical_backup_job_prefix,omitempty"`
|
||||
CPURequest string `json:"logical_backup_cpu_request,omitempty"`
|
||||
MemoryRequest string `json:"logical_backup_memory_request,omitempty"`
|
||||
CPULimit string `json:"logical_backup_cpu_limit,omitempty"`
|
||||
MemoryLimit string `json:"logical_backup_memory_limit,omitempty"`
|
||||
}
|
||||
|
||||
// PatroniConfiguration defines configuration for Patroni
|
||||
type PatroniConfiguration struct {
|
||||
FailsafeMode *bool `json:"failsafe_mode,omitempty"`
|
||||
}
|
||||
|
||||
// OperatorConfigurationData defines the operation config
|
||||
|
|
@ -232,12 +251,11 @@ type OperatorConfigurationData struct {
|
|||
EnableLazySpiloUpgrade bool `json:"enable_lazy_spilo_upgrade,omitempty"`
|
||||
EnablePgVersionEnvVar bool `json:"enable_pgversion_env_var,omitempty"`
|
||||
EnableSpiloWalPathCompat bool `json:"enable_spilo_wal_path_compat,omitempty"`
|
||||
EnableTeamIdClusternamePrefix bool `json:"enable_team_id_clustername_prefix,omitempty"`
|
||||
EtcdHost string `json:"etcd_host,omitempty"`
|
||||
KubernetesUseConfigMaps bool `json:"kubernetes_use_configmaps,omitempty"`
|
||||
DockerImage string `json:"docker_image,omitempty"`
|
||||
Workers uint32 `json:"workers,omitempty"`
|
||||
MinInstances int32 `json:"min_instances,omitempty"`
|
||||
MaxInstances int32 `json:"max_instances,omitempty"`
|
||||
ResyncPeriod Duration `json:"resync_period,omitempty"`
|
||||
RepairPeriod Duration `json:"repair_period,omitempty"`
|
||||
SetMemoryRequestToLimit bool `json:"set_memory_request_to_limit,omitempty"`
|
||||
|
|
@ -257,6 +275,11 @@ type OperatorConfigurationData struct {
|
|||
Scalyr ScalyrConfiguration `json:"scalyr"`
|
||||
LogicalBackup OperatorLogicalBackupConfiguration `json:"logical_backup"`
|
||||
ConnectionPooler ConnectionPoolerConfiguration `json:"connection_pooler"`
|
||||
Patroni PatroniConfiguration `json:"patroni"`
|
||||
|
||||
MinInstances int32 `json:"min_instances,omitempty"`
|
||||
MaxInstances int32 `json:"max_instances,omitempty"`
|
||||
IgnoreInstanceLimitsAnnotationKey string `json:"ignore_instance_limits_annotation_key,omitempty"`
|
||||
}
|
||||
|
||||
// Duration shortens this frequently used name
|
||||
|
|
|
|||
|
|
@ -36,6 +36,9 @@ type PostgresSpec struct {
|
|||
TeamID string `json:"teamId"`
|
||||
DockerImage string `json:"dockerImage,omitempty"`
|
||||
|
||||
// deprecated field storing cluster name without teamId prefix
|
||||
ClusterName string `json:"-"`
|
||||
|
||||
SpiloRunAsUser *int64 `json:"spiloRunAsUser,omitempty"`
|
||||
SpiloRunAsGroup *int64 `json:"spiloRunAsGroup,omitempty"`
|
||||
SpiloFSGroup *int64 `json:"spiloFSGroup,omitempty"`
|
||||
|
|
@ -62,7 +65,6 @@ type PostgresSpec struct {
|
|||
NumberOfInstances int32 `json:"numberOfInstances"`
|
||||
MaintenanceWindows []MaintenanceWindow `json:"maintenanceWindows,omitempty"`
|
||||
Clone *CloneDescription `json:"clone,omitempty"`
|
||||
ClusterName string `json:"-"`
|
||||
Databases map[string]string `json:"databases,omitempty"`
|
||||
PreparedDatabases map[string]PreparedDatabase `json:"preparedDatabases,omitempty"`
|
||||
SchedulerName *string `json:"schedulerName,omitempty"`
|
||||
|
|
@ -77,6 +79,10 @@ type PostgresSpec struct {
|
|||
StandbyCluster *StandbyDescription `json:"standby,omitempty"`
|
||||
PodAnnotations map[string]string `json:"podAnnotations,omitempty"`
|
||||
ServiceAnnotations map[string]string `json:"serviceAnnotations,omitempty"`
|
||||
// MasterServiceAnnotations takes precedence over ServiceAnnotations for master role if not empty
|
||||
MasterServiceAnnotations map[string]string `json:"masterServiceAnnotations,omitempty"`
|
||||
// ReplicaServiceAnnotations takes precedence over ServiceAnnotations for replica role if not empty
|
||||
ReplicaServiceAnnotations map[string]string `json:"replicaServiceAnnotations,omitempty"`
|
||||
TLS *TLSDescription `json:"tls,omitempty"`
|
||||
AdditionalVolumes []AdditionalVolume `json:"additionalVolumes,omitempty"`
|
||||
Streams []Stream `json:"streams,omitempty"`
|
||||
|
|
@ -113,10 +119,10 @@ type PreparedSchema struct {
|
|||
|
||||
// MaintenanceWindow describes the time window when the operator is allowed to do maintenance on a cluster.
|
||||
type MaintenanceWindow struct {
|
||||
Everyday bool
|
||||
Weekday time.Weekday
|
||||
StartTime metav1.Time // Start time
|
||||
EndTime metav1.Time // End time
|
||||
Everyday bool `json:"everyday,omitempty"`
|
||||
Weekday time.Weekday `json:"weekday,omitempty"`
|
||||
StartTime metav1.Time `json:"startTime,omitempty"`
|
||||
EndTime metav1.Time `json:"endTime,omitempty"`
|
||||
}
|
||||
|
||||
// Volume describes a single volume in the manifest.
|
||||
|
|
@ -169,6 +175,7 @@ type Patroni struct {
|
|||
SynchronousMode bool `json:"synchronous_mode,omitempty"`
|
||||
SynchronousModeStrict bool `json:"synchronous_mode_strict,omitempty"`
|
||||
SynchronousNodeCount uint32 `json:"synchronous_node_count,omitempty" defaults:"1"`
|
||||
FailsafeMode *bool `json:"failsafe_mode,omitempty"`
|
||||
}
|
||||
|
||||
// StandbyDescription contains remote primary config or s3/gs wal path
|
||||
|
|
|
|||
|
|
@ -46,7 +46,7 @@ func parseWeekday(s string) (time.Weekday, error) {
|
|||
return time.Weekday(weekday), nil
|
||||
}
|
||||
|
||||
func extractClusterName(clusterName string, teamName string) (string, error) {
|
||||
func ExtractClusterName(clusterName string, teamName string) (string, error) {
|
||||
teamNameLen := len(teamName)
|
||||
if len(clusterName) < teamNameLen+2 {
|
||||
return "", fmt.Errorf("cluster name must match {TEAM}-{NAME} format. Got cluster name '%v', team name '%v'", clusterName, teamName)
|
||||
|
|
|
|||
|
|
@ -213,7 +213,7 @@ var unmarshalCluster = []struct {
|
|||
"127.0.0.1/32"
|
||||
],
|
||||
"postgresql": {
|
||||
"version": "9.6",
|
||||
"version": "15",
|
||||
"parameters": {
|
||||
"shared_buffers": "32MB",
|
||||
"max_connections": "10",
|
||||
|
|
@ -273,7 +273,7 @@ var unmarshalCluster = []struct {
|
|||
},
|
||||
Spec: PostgresSpec{
|
||||
PostgresqlParam: PostgresqlParam{
|
||||
PgVersion: "9.6",
|
||||
PgVersion: "15",
|
||||
Parameters: map[string]string{
|
||||
"shared_buffers": "32MB",
|
||||
"max_connections": "10",
|
||||
|
|
@ -330,28 +330,10 @@ var unmarshalCluster = []struct {
|
|||
Clone: &CloneDescription{
|
||||
ClusterName: "acid-batman",
|
||||
},
|
||||
ClusterName: "testcluster1",
|
||||
},
|
||||
Error: "",
|
||||
},
|
||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"9.6","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"pod_priority_class_name":"spilo-pod-priority","volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"enableShmVolume":false,"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"acid","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`),
|
||||
err: nil},
|
||||
{
|
||||
about: "example with teamId set in input",
|
||||
in: []byte(`{"kind": "Postgresql","apiVersion": "acid.zalan.do/v1","metadata": {"name": "teapot-testcluster1"}, "spec": {"teamId": "acid"}}`),
|
||||
out: Postgresql{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "Postgresql",
|
||||
APIVersion: "acid.zalan.do/v1",
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: "teapot-testcluster1",
|
||||
},
|
||||
Spec: PostgresSpec{TeamID: "acid"},
|
||||
Status: PostgresStatus{PostgresClusterStatus: ClusterStatusInvalid},
|
||||
Error: errors.New("name must match {TEAM}-{NAME} format").Error(),
|
||||
},
|
||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"teapot-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"","parameters":null},"volume":{"size":"","storageClass":""},"patroni":{"initdb":null,"pg_hba":null,"ttl":0,"loop_wait":0,"retry_timeout":0,"maximum_lag_on_failover":0,"slots":null},"teamId":"acid","allowedSourceRanges":null,"numberOfInstances":0,"users":null,"clone":null},"status":{"PostgresClusterStatus":"Invalid"}}`),
|
||||
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"15","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"pod_priority_class_name":"spilo-pod-priority","volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"enableShmVolume":false,"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"acid","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`),
|
||||
err: nil},
|
||||
{
|
||||
about: "example with clone",
|
||||
|
|
@ -369,7 +351,6 @@ var unmarshalCluster = []struct {
|
|||
Clone: &CloneDescription{
|
||||
ClusterName: "team-batman",
|
||||
},
|
||||
ClusterName: "testcluster1",
|
||||
},
|
||||
Error: "",
|
||||
},
|
||||
|
|
@ -391,7 +372,6 @@ var unmarshalCluster = []struct {
|
|||
StandbyCluster: &StandbyDescription{
|
||||
S3WalPath: "s3://custom/path/to/bucket/",
|
||||
},
|
||||
ClusterName: "testcluster1",
|
||||
},
|
||||
Error: "",
|
||||
},
|
||||
|
|
@ -418,7 +398,7 @@ var postgresqlList = []struct {
|
|||
out PostgresqlList
|
||||
err error
|
||||
}{
|
||||
{"expect success", []byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace":"default","resourceVersion":"30446957","selfLink":"/apis/acid.zalan.do/v1/namespaces/default/postgresqls/acid-testcluster42","uid":"857cd208-33dc-11e7-b20a-0699041e4b03"},"spec":{"allowedSourceRanges":["185.85.220.0/22"],"numberOfInstances":1,"postgresql":{"version":"9.6"},"teamId":"acid","volume":{"size":"10Gi"}},"status":{"PostgresClusterStatus":"Running"}}],"kind":"List","metadata":{},"resourceVersion":"","selfLink":""}`),
|
||||
{"expect success", []byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace":"default","resourceVersion":"30446957","selfLink":"/apis/acid.zalan.do/v1/namespaces/default/postgresqls/acid-testcluster42","uid":"857cd208-33dc-11e7-b20a-0699041e4b03"},"spec":{"allowedSourceRanges":["185.85.220.0/22"],"numberOfInstances":1,"postgresql":{"version":"15"},"teamId":"acid","volume":{"size":"10Gi"}},"status":{"PostgresClusterStatus":"Running"}}],"kind":"List","metadata":{},"resourceVersion":"","selfLink":""}`),
|
||||
PostgresqlList{
|
||||
TypeMeta: metav1.TypeMeta{
|
||||
Kind: "List",
|
||||
|
|
@ -439,7 +419,7 @@ var postgresqlList = []struct {
|
|||
},
|
||||
Spec: PostgresSpec{
|
||||
ClusterName: "testcluster42",
|
||||
PostgresqlParam: PostgresqlParam{PgVersion: "9.6"},
|
||||
PostgresqlParam: PostgresqlParam{PgVersion: "15"},
|
||||
Volume: Volume{Size: "10Gi"},
|
||||
TeamID: "acid",
|
||||
AllowedSourceRanges: []string{"185.85.220.0/22"},
|
||||
|
|
@ -628,10 +608,10 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
func TestClusterName(t *testing.T) {
|
||||
for _, tt := range clusterNames {
|
||||
t.Run(tt.about, func(t *testing.T) {
|
||||
name, err := extractClusterName(tt.in, tt.inTeam)
|
||||
name, err := ExtractClusterName(tt.in, tt.inTeam)
|
||||
if err != nil {
|
||||
if tt.err == nil || err.Error() != tt.err.Error() {
|
||||
t.Errorf("extractClusterName expected error: %v, got: %v", tt.err, err)
|
||||
t.Errorf("ExtractClusterName expected error: %v, got: %v", tt.err, err)
|
||||
}
|
||||
return
|
||||
} else if tt.err != nil {
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@
|
|||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright 2022 Compose, Zalando SE
|
||||
Copyright 2023 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
@ -193,6 +193,11 @@ func (in *KubernetesMetaConfiguration) DeepCopyInto(out *KubernetesMetaConfigura
|
|||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
if in.SharePgSocketWithSidecars != nil {
|
||||
in, out := &in.SharePgSocketWithSidecars, &out.SharePgSocketWithSidecars
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
out.OAuthTokenSecretName = in.OAuthTokenSecretName
|
||||
out.InfrastructureRolesSecretName = in.InfrastructureRolesSecretName
|
||||
if in.InfrastructureRolesDefs != nil {
|
||||
|
|
@ -423,6 +428,7 @@ func (in *OperatorConfigurationData) DeepCopyInto(out *OperatorConfigurationData
|
|||
out.Scalyr = in.Scalyr
|
||||
out.LogicalBackup = in.LogicalBackup
|
||||
in.ConnectionPooler.DeepCopyInto(&out.ConnectionPooler)
|
||||
in.Patroni.DeepCopyInto(&out.Patroni)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -549,6 +555,11 @@ func (in *Patroni) DeepCopyInto(out *Patroni) {
|
|||
(*out)[key] = outVal
|
||||
}
|
||||
}
|
||||
if in.FailsafeMode != nil {
|
||||
in, out := &in.FailsafeMode, &out.FailsafeMode
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -562,6 +573,27 @@ func (in *Patroni) DeepCopy() *Patroni {
|
|||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PatroniConfiguration) DeepCopyInto(out *PatroniConfiguration) {
|
||||
*out = *in
|
||||
if in.FailsafeMode != nil {
|
||||
in, out := &in.FailsafeMode, &out.FailsafeMode
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PatroniConfiguration.
|
||||
func (in *PatroniConfiguration) DeepCopy() *PatroniConfiguration {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(PatroniConfiguration)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *PostgresPodResourcesDefaults) DeepCopyInto(out *PostgresPodResourcesDefaults) {
|
||||
*out = *in
|
||||
|
|
@ -760,6 +792,20 @@ func (in *PostgresSpec) DeepCopyInto(out *PostgresSpec) {
|
|||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.MasterServiceAnnotations != nil {
|
||||
in, out := &in.MasterServiceAnnotations, &out.MasterServiceAnnotations
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.ReplicaServiceAnnotations != nil {
|
||||
in, out := &in.ReplicaServiceAnnotations, &out.ReplicaServiceAnnotations
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
if in.TLS != nil {
|
||||
in, out := &in.TLS, &out.TLS
|
||||
*out = new(TLSDescription)
|
||||
|
|
|
|||
|
|
@ -12,7 +12,6 @@ import (
|
|||
"time"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/zalando/postgres-operator/pkg/cluster"
|
||||
"github.com/zalando/postgres-operator/pkg/spec"
|
||||
"github.com/zalando/postgres-operator/pkg/util"
|
||||
|
|
@ -31,9 +30,9 @@ type controllerInformer interface {
|
|||
GetOperatorConfig() *config.Config
|
||||
GetStatus() *spec.ControllerStatus
|
||||
TeamClusterList() map[string][]spec.NamespacedName
|
||||
ClusterStatus(team, namespace, cluster string) (*cluster.ClusterStatus, error)
|
||||
ClusterLogs(team, namespace, cluster string) ([]*spec.LogEntry, error)
|
||||
ClusterHistory(team, namespace, cluster string) ([]*spec.Diff, error)
|
||||
ClusterStatus(namespace, cluster string) (*cluster.ClusterStatus, error)
|
||||
ClusterLogs(namespace, cluster string) ([]*spec.LogEntry, error)
|
||||
ClusterHistory(namespace, cluster string) ([]*spec.Diff, error)
|
||||
ClusterDatabasesMap() map[string][]string
|
||||
WorkerLogs(workerID uint32) ([]*spec.LogEntry, error)
|
||||
ListQueue(workerID uint32) (*spec.QueueDump, error)
|
||||
|
|
@ -55,9 +54,9 @@ const (
|
|||
)
|
||||
|
||||
var (
|
||||
clusterStatusRe = fmt.Sprintf(`^/clusters/%s/%s/%s/?$`, teamRe, namespaceRe, clusterRe)
|
||||
clusterLogsRe = fmt.Sprintf(`^/clusters/%s/%s/%s/logs/?$`, teamRe, namespaceRe, clusterRe)
|
||||
clusterHistoryRe = fmt.Sprintf(`^/clusters/%s/%s/%s/history/?$`, teamRe, namespaceRe, clusterRe)
|
||||
clusterStatusRe = fmt.Sprintf(`^/clusters/%s/%s/?$`, namespaceRe, clusterRe)
|
||||
clusterLogsRe = fmt.Sprintf(`^/clusters/%s/%s/logs/?$`, namespaceRe, clusterRe)
|
||||
clusterHistoryRe = fmt.Sprintf(`^/clusters/%s/%s/history/?$`, namespaceRe, clusterRe)
|
||||
teamURLRe = fmt.Sprintf(`^/clusters/%s/?$`, teamRe)
|
||||
|
||||
clusterStatusURL = regexp.MustCompile(clusterStatusRe)
|
||||
|
|
@ -87,6 +86,7 @@ func New(controller controllerInformer, port int, logger *logrus.Logger) *Server
|
|||
mux.Handle("/debug/pprof/trace", http.HandlerFunc(pprof.Trace))
|
||||
|
||||
mux.Handle("/status/", http.HandlerFunc(s.controllerStatus))
|
||||
mux.Handle("/readyz/", http.HandlerFunc(s.controllerReady))
|
||||
mux.Handle("/config/", http.HandlerFunc(s.operatorConfig))
|
||||
|
||||
mux.HandleFunc("/clusters/", s.clusters)
|
||||
|
|
@ -155,6 +155,10 @@ func (s *Server) controllerStatus(w http.ResponseWriter, req *http.Request) {
|
|||
s.respond(s.controller.GetStatus(), nil, w)
|
||||
}
|
||||
|
||||
func (s *Server) controllerReady(w http.ResponseWriter, req *http.Request) {
|
||||
s.respond("OK", nil, w)
|
||||
}
|
||||
|
||||
func (s *Server) operatorConfig(w http.ResponseWriter, req *http.Request) {
|
||||
s.respond(map[string]interface{}{
|
||||
"controller": s.controller.GetConfig(),
|
||||
|
|
@ -170,7 +174,7 @@ func (s *Server) clusters(w http.ResponseWriter, req *http.Request) {
|
|||
|
||||
if matches := util.FindNamedStringSubmatch(clusterStatusURL, req.URL.Path); matches != nil {
|
||||
namespace := matches["namespace"]
|
||||
resp, err = s.controller.ClusterStatus(matches["team"], namespace, matches["cluster"])
|
||||
resp, err = s.controller.ClusterStatus(namespace, matches["cluster"])
|
||||
} else if matches := util.FindNamedStringSubmatch(teamURL, req.URL.Path); matches != nil {
|
||||
teamClusters := s.controller.TeamClusterList()
|
||||
clusters, found := teamClusters[matches["team"]]
|
||||
|
|
@ -181,21 +185,21 @@ func (s *Server) clusters(w http.ResponseWriter, req *http.Request) {
|
|||
|
||||
clusterNames := make([]string, 0)
|
||||
for _, cluster := range clusters {
|
||||
clusterNames = append(clusterNames, cluster.Name[len(matches["team"])+1:])
|
||||
clusterNames = append(clusterNames, cluster.Name)
|
||||
}
|
||||
|
||||
resp, err = clusterNames, nil
|
||||
} else if matches := util.FindNamedStringSubmatch(clusterLogsURL, req.URL.Path); matches != nil {
|
||||
namespace := matches["namespace"]
|
||||
resp, err = s.controller.ClusterLogs(matches["team"], namespace, matches["cluster"])
|
||||
resp, err = s.controller.ClusterLogs(namespace, matches["cluster"])
|
||||
} else if matches := util.FindNamedStringSubmatch(clusterHistoryURL, req.URL.Path); matches != nil {
|
||||
namespace := matches["namespace"]
|
||||
resp, err = s.controller.ClusterHistory(matches["team"], namespace, matches["cluster"])
|
||||
resp, err = s.controller.ClusterHistory(namespace, matches["cluster"])
|
||||
} else if req.URL.Path == clustersURL {
|
||||
clusterNamesPerTeam := make(map[string][]string)
|
||||
for team, clusters := range s.controller.TeamClusterList() {
|
||||
for _, cluster := range clusters {
|
||||
clusterNamesPerTeam[team] = append(clusterNamesPerTeam[team], cluster.Name[len(team)+1:])
|
||||
clusterNamesPerTeam[team] = append(clusterNamesPerTeam[team], cluster.Name)
|
||||
}
|
||||
}
|
||||
resp, err = clusterNamesPerTeam, nil
|
||||
|
|
|
|||
|
|
@ -5,9 +5,9 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
clusterStatusTest = "/clusters/test-id/test_namespace/testcluster/"
|
||||
clusterStatusNumericTest = "/clusters/test-id-1/test_namespace/testcluster/"
|
||||
clusterLogsTest = "/clusters/test-id/test_namespace/testcluster/logs/"
|
||||
clusterStatusTest = "/clusters/test-namespace/testcluster/"
|
||||
clusterStatusNumericTest = "/clusters/test-namespace-1/testcluster/"
|
||||
clusterLogsTest = "/clusters/test-namespace/testcluster/logs/"
|
||||
teamTest = "/clusters/test-id/"
|
||||
)
|
||||
|
||||
|
|
|
|||
|
|
@ -29,7 +29,7 @@ import (
|
|||
"github.com/zalando/postgres-operator/pkg/util/volumes"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policybeta1 "k8s.io/api/policy/v1beta1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
rbacv1 "k8s.io/api/rbac/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
|
@ -61,7 +61,7 @@ type kubeResources struct {
|
|||
Endpoints map[PostgresRole]*v1.Endpoints
|
||||
Secrets map[types.UID]*v1.Secret
|
||||
Statefulset *appsv1.StatefulSet
|
||||
PodDisruptionBudget *policybeta1.PodDisruptionBudget
|
||||
PodDisruptionBudget *policyv1.PodDisruptionBudget
|
||||
//Pods are treated separately
|
||||
//PVCs are treated separately
|
||||
}
|
||||
|
|
@ -84,6 +84,7 @@ type Cluster struct {
|
|||
userSyncStrategy spec.UserSyncer
|
||||
deleteOptions metav1.DeleteOptions
|
||||
podEventsQueue *cache.FIFO
|
||||
replicationSlots map[string]interface{}
|
||||
|
||||
teamsAPIClient teams.Interface
|
||||
oauthTokenGetter OAuthTokenGetter
|
||||
|
|
@ -91,7 +92,6 @@ type Cluster struct {
|
|||
currentProcess Process
|
||||
processMu sync.RWMutex // protects the current operation for reporting, no need to hold the master mutex
|
||||
specMu sync.RWMutex // protects the spec for reporting, no need to hold the master mutex
|
||||
streamApplications []string
|
||||
ConnectionPooler map[PostgresRole]*ConnectionPoolerObjects
|
||||
EBSVolumes map[string]volumes.VolumeProperties
|
||||
VolumeResizer volumes.VolumeResizer
|
||||
|
|
@ -141,6 +141,7 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres
|
|||
podEventsQueue: podEventsQueue,
|
||||
KubeClient: kubeClient,
|
||||
currentMajorVersion: 0,
|
||||
replicationSlots: make(map[string]interface{}),
|
||||
}
|
||||
cluster.logger = logger.WithField("pkg", "cluster").WithField("cluster-name", cluster.clusterName())
|
||||
cluster.teamsAPIClient = teams.NewTeamsAPI(cfg.OpConfig.TeamsAPIUrl, logger)
|
||||
|
|
@ -227,6 +228,10 @@ func (c *Cluster) initUsers() error {
|
|||
}
|
||||
|
||||
if err := c.initHumanUsers(); err != nil {
|
||||
// remember all cached users in c.pgUsers
|
||||
for cachedUserName, cachedUser := range c.pgUsersCache {
|
||||
c.pgUsers[cachedUserName] = cachedUser
|
||||
}
|
||||
return fmt.Errorf("could not init human users: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -371,6 +376,10 @@ func (c *Cluster) Create() error {
|
|||
}
|
||||
}
|
||||
|
||||
for slotName, desiredSlot := range c.Spec.Patroni.Slots {
|
||||
c.replicationSlots[slotName] = desiredSlot
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -389,6 +398,11 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
|
|||
needsReplace = true
|
||||
reasons = append(reasons, "new statefulset's annotations do not match: "+reason)
|
||||
}
|
||||
if c.Statefulset.Spec.PodManagementPolicy != statefulSet.Spec.PodManagementPolicy {
|
||||
match = false
|
||||
needsReplace = true
|
||||
reasons = append(reasons, "new statefulset's pod management policy do not match")
|
||||
}
|
||||
|
||||
needsRollUpdate, reasons = c.compareContainers("initContainers", c.Statefulset.Spec.Template.Spec.InitContainers, statefulSet.Spec.Template.Spec.InitContainers, needsRollUpdate, reasons)
|
||||
needsRollUpdate, reasons = c.compareContainers("containers", c.Statefulset.Spec.Template.Spec.Containers, statefulSet.Spec.Template.Spec.Containers, needsRollUpdate, reasons)
|
||||
|
|
@ -528,6 +542,8 @@ func (c *Cluster) compareContainers(description string, setA, setB []v1.Containe
|
|||
checks := []containerCheck{
|
||||
newCheck("new statefulset %s's %s (index %d) name does not match the current one",
|
||||
func(a, b v1.Container) bool { return a.Name != b.Name }),
|
||||
newCheck("new statefulset %s's %s (index %d) readiness probe does not match the current one",
|
||||
func(a, b v1.Container) bool { return !reflect.DeepEqual(a.ReadinessProbe, b.ReadinessProbe) }),
|
||||
newCheck("new statefulset %s's %s (index %d) ports do not match the current one",
|
||||
func(a, b v1.Container) bool { return !comparePorts(a.Ports, b.Ports) }),
|
||||
newCheck("new statefulset %s's %s (index %d) resources do not match the current ones",
|
||||
|
|
@ -741,6 +757,7 @@ func (c *Cluster) compareServices(old, new *v1.Service) (bool, string) {
|
|||
// for a cluster that had no such job before. In this case a missing job is not an error.
|
||||
func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
||||
updateFailed := false
|
||||
userInitFailed := false
|
||||
syncStatefulSet := false
|
||||
|
||||
c.mu.Lock()
|
||||
|
|
@ -778,32 +795,39 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
}
|
||||
}
|
||||
|
||||
// check if users need to be synced
|
||||
// Users
|
||||
func() {
|
||||
// check if users need to be synced during update
|
||||
sameUsers := reflect.DeepEqual(oldSpec.Spec.Users, newSpec.Spec.Users) &&
|
||||
reflect.DeepEqual(oldSpec.Spec.PreparedDatabases, newSpec.Spec.PreparedDatabases)
|
||||
sameRotatedUsers := reflect.DeepEqual(oldSpec.Spec.UsersWithSecretRotation, newSpec.Spec.UsersWithSecretRotation) &&
|
||||
reflect.DeepEqual(oldSpec.Spec.UsersWithInPlaceSecretRotation, newSpec.Spec.UsersWithInPlaceSecretRotation)
|
||||
|
||||
// connection pooler needs one system user created, which is done in
|
||||
// initUsers. Check if it needs to be called.
|
||||
needConnectionPooler := needMasterConnectionPoolerWorker(&newSpec.Spec) ||
|
||||
needReplicaConnectionPoolerWorker(&newSpec.Spec)
|
||||
// connection pooler needs one system user created who is initialized in initUsers
|
||||
// only when disabled in oldSpec and enabled in newSpec
|
||||
needPoolerUser := c.needConnectionPoolerUser(&oldSpec.Spec, &newSpec.Spec)
|
||||
|
||||
if !sameUsers || !sameRotatedUsers || needConnectionPooler {
|
||||
// streams new replication user created who is initialized in initUsers
|
||||
// only when streams were not specified in oldSpec but in newSpec
|
||||
needStreamUser := len(oldSpec.Spec.Streams) == 0 && len(newSpec.Spec.Streams) > 0
|
||||
|
||||
if !sameUsers || !sameRotatedUsers || needPoolerUser || needStreamUser {
|
||||
c.logger.Debugf("initialize users")
|
||||
if err := c.initUsers(); err != nil {
|
||||
c.logger.Errorf("could not init users: %v", err)
|
||||
c.logger.Errorf("could not init users - skipping sync of secrets and databases: %v", err)
|
||||
userInitFailed = true
|
||||
updateFailed = true
|
||||
return
|
||||
}
|
||||
|
||||
c.logger.Debugf("syncing secrets")
|
||||
|
||||
//TODO: mind the secrets of the deleted/new users
|
||||
if err := c.syncSecrets(); err != nil {
|
||||
c.logger.Errorf("could not sync secrets: %v", err)
|
||||
updateFailed = true
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
// Volume
|
||||
if c.OpConfig.StorageResizeMode != "off" {
|
||||
|
|
@ -812,6 +836,11 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
c.logger.Infof("Storage resize is disabled (storage_resize_mode is off). Skipping volume sync.")
|
||||
}
|
||||
|
||||
// streams configuration
|
||||
if len(oldSpec.Spec.Streams) == 0 && len(newSpec.Spec.Streams) > 0 {
|
||||
syncStatefulSet = true
|
||||
}
|
||||
|
||||
// Statefulset
|
||||
func() {
|
||||
oldSs, err := c.generateStatefulSet(&oldSpec.Spec)
|
||||
|
|
@ -827,6 +856,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
updateFailed = true
|
||||
return
|
||||
}
|
||||
|
||||
if syncStatefulSet || !reflect.DeepEqual(oldSs, newSs) {
|
||||
c.logger.Debugf("syncing statefulsets")
|
||||
syncStatefulSet = false
|
||||
|
|
@ -885,7 +915,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
}()
|
||||
|
||||
// Roles and Databases
|
||||
if !(c.databaseAccessDisabled() || c.getNumberOfInstances(&c.Spec) <= 0 || c.Spec.StandbyCluster != nil) {
|
||||
if !userInitFailed && !(c.databaseAccessDisabled() || c.getNumberOfInstances(&c.Spec) <= 0 || c.Spec.StandbyCluster != nil) {
|
||||
c.logger.Debugf("syncing roles")
|
||||
if err := c.syncRoles(); err != nil {
|
||||
c.logger.Errorf("could not sync roles: %v", err)
|
||||
|
|
@ -913,13 +943,13 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
|
|||
// need to process. In the future we may want to do this more careful and
|
||||
// check which databases we need to process, but even repeating the whole
|
||||
// installation process should be good enough.
|
||||
|
||||
if _, err := c.syncConnectionPooler(oldSpec, newSpec, c.installLookupFunction); err != nil {
|
||||
c.logger.Errorf("could not sync connection pooler: %v", err)
|
||||
updateFailed = true
|
||||
}
|
||||
|
||||
if len(c.Spec.Streams) > 0 {
|
||||
// streams
|
||||
if len(newSpec.Spec.Streams) > 0 {
|
||||
if err := c.syncStreams(); err != nil {
|
||||
c.logger.Errorf("could not sync streams: %v", err)
|
||||
updateFailed = true
|
||||
|
|
@ -1087,42 +1117,20 @@ func (c *Cluster) initSystemUsers() {
|
|||
Password: util.RandomPassword(constants.PasswordLength),
|
||||
}
|
||||
|
||||
// Connection pooler user is an exception, if requested it's going to be
|
||||
// created by operator as a normal pgUser
|
||||
// Connection pooler user is an exception
|
||||
// if requested it's going to be created by operator
|
||||
if needConnectionPooler(&c.Spec) {
|
||||
connectionPoolerSpec := c.Spec.ConnectionPooler
|
||||
if connectionPoolerSpec == nil {
|
||||
connectionPoolerSpec = &acidv1.ConnectionPooler{}
|
||||
}
|
||||
|
||||
// Using superuser as pooler user is not a good idea. First of all it's
|
||||
// not going to be synced correctly with the current implementation,
|
||||
// and second it's a bad practice.
|
||||
username := c.OpConfig.ConnectionPooler.User
|
||||
|
||||
isSuperUser := connectionPoolerSpec.User == c.OpConfig.SuperUsername
|
||||
isProtectedUser := c.shouldAvoidProtectedOrSystemRole(
|
||||
connectionPoolerSpec.User, "connection pool role")
|
||||
|
||||
if !isSuperUser && !isProtectedUser {
|
||||
username = util.Coalesce(
|
||||
connectionPoolerSpec.User,
|
||||
c.OpConfig.ConnectionPooler.User)
|
||||
}
|
||||
username := c.poolerUser(&c.Spec)
|
||||
|
||||
// connection pooler application should be able to login with this role
|
||||
connectionPoolerUser := spec.PgUser{
|
||||
Origin: spec.RoleConnectionPooler,
|
||||
Origin: spec.RoleOriginConnectionPooler,
|
||||
Name: username,
|
||||
Namespace: c.Namespace,
|
||||
Flags: []string{constants.RoleFlagLogin},
|
||||
Password: util.RandomPassword(constants.PasswordLength),
|
||||
}
|
||||
|
||||
if _, exists := c.pgUsers[username]; !exists {
|
||||
c.pgUsers[username] = connectionPoolerUser
|
||||
}
|
||||
|
||||
if _, exists := c.systemUsers[constants.ConnectionPoolerUserKeyName]; !exists {
|
||||
c.systemUsers[constants.ConnectionPoolerUserKeyName] = connectionPoolerUser
|
||||
}
|
||||
|
|
@ -1131,17 +1139,17 @@ func (c *Cluster) initSystemUsers() {
|
|||
// replication users for event streams are another exception
|
||||
// the operator will create one replication user for all streams
|
||||
if len(c.Spec.Streams) > 0 {
|
||||
username := constants.EventStreamSourceSlotPrefix + constants.UserRoleNameSuffix
|
||||
username := fmt.Sprintf("%s%s", constants.EventStreamSourceSlotPrefix, constants.UserRoleNameSuffix)
|
||||
streamUser := spec.PgUser{
|
||||
Origin: spec.RoleConnectionPooler,
|
||||
Origin: spec.RoleOriginStream,
|
||||
Name: username,
|
||||
Namespace: c.Namespace,
|
||||
Flags: []string{constants.RoleFlagLogin, constants.RoleFlagReplication},
|
||||
Password: util.RandomPassword(constants.PasswordLength),
|
||||
}
|
||||
|
||||
if _, exists := c.pgUsers[username]; !exists {
|
||||
c.pgUsers[username] = streamUser
|
||||
if _, exists := c.systemUsers[constants.EventStreamUserKeyName]; !exists {
|
||||
c.systemUsers[constants.EventStreamUserKeyName] = streamUser
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1159,9 +1167,9 @@ func (c *Cluster) initPreparedDatabaseRoles() error {
|
|||
constants.WriterRoleNameSuffix: constants.ReaderRoleNameSuffix,
|
||||
}
|
||||
defaultUsers := map[string]string{
|
||||
constants.OwnerRoleNameSuffix + constants.UserRoleNameSuffix: constants.OwnerRoleNameSuffix,
|
||||
constants.ReaderRoleNameSuffix + constants.UserRoleNameSuffix: constants.ReaderRoleNameSuffix,
|
||||
constants.WriterRoleNameSuffix + constants.UserRoleNameSuffix: constants.WriterRoleNameSuffix,
|
||||
fmt.Sprintf("%s%s", constants.OwnerRoleNameSuffix, constants.UserRoleNameSuffix): constants.OwnerRoleNameSuffix,
|
||||
fmt.Sprintf("%s%s", constants.ReaderRoleNameSuffix, constants.UserRoleNameSuffix): constants.ReaderRoleNameSuffix,
|
||||
fmt.Sprintf("%s%s", constants.WriterRoleNameSuffix, constants.UserRoleNameSuffix): constants.WriterRoleNameSuffix,
|
||||
}
|
||||
|
||||
for preparedDbName, preparedDB := range c.Spec.PreparedDatabases {
|
||||
|
|
@ -1222,7 +1230,7 @@ func (c *Cluster) initDefaultRoles(defaultRoles map[string]string, admin, prefix
|
|||
c.logger.Warn("secretNamespace ignored because enable_cross_namespace_secret set to false. Creating secrets in cluster namespace.")
|
||||
}
|
||||
}
|
||||
roleName := prefix + defaultRole
|
||||
roleName := fmt.Sprintf("%s%s", prefix, defaultRole)
|
||||
|
||||
flags := []string{constants.RoleFlagNoLogin}
|
||||
if defaultRole[len(defaultRole)-5:] == constants.UserRoleNameSuffix {
|
||||
|
|
@ -1240,7 +1248,7 @@ func (c *Cluster) initDefaultRoles(defaultRoles map[string]string, admin, prefix
|
|||
adminRole = admin
|
||||
isOwner = true
|
||||
} else {
|
||||
adminRole = prefix + constants.OwnerRoleNameSuffix
|
||||
adminRole = fmt.Sprintf("%s%s", prefix, constants.OwnerRoleNameSuffix)
|
||||
}
|
||||
|
||||
newRole := spec.PgUser{
|
||||
|
|
@ -1482,7 +1490,8 @@ func (c *Cluster) GetCurrentProcess() Process {
|
|||
// GetStatus provides status of the cluster
|
||||
func (c *Cluster) GetStatus() *ClusterStatus {
|
||||
status := &ClusterStatus{
|
||||
Cluster: c.Spec.ClusterName,
|
||||
Cluster: c.Name,
|
||||
Namespace: c.Namespace,
|
||||
Team: c.Spec.TeamID,
|
||||
Status: c.Status,
|
||||
Spec: c.Spec,
|
||||
|
|
|
|||
|
|
@ -2,13 +2,13 @@ package cluster
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/stretchr/testify/assert"
|
||||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake"
|
||||
"github.com/zalando/postgres-operator/pkg/spec"
|
||||
|
|
@ -61,7 +61,6 @@ var cl = New(
|
|||
)
|
||||
|
||||
func TestStatefulSetAnnotations(t *testing.T) {
|
||||
testName := "CheckStatefulsetAnnotations"
|
||||
spec := acidv1.PostgresSpec{
|
||||
TeamID: "myapp", NumberOfInstances: 1,
|
||||
Resources: &acidv1.Resources{
|
||||
|
|
@ -74,19 +73,59 @@ func TestStatefulSetAnnotations(t *testing.T) {
|
|||
}
|
||||
ss, err := cl.generateStatefulSet(&spec)
|
||||
if err != nil {
|
||||
t.Errorf("in %s no statefulset created %v", testName, err)
|
||||
t.Errorf("in %s no statefulset created %v", t.Name(), err)
|
||||
}
|
||||
if ss != nil {
|
||||
annotation := ss.ObjectMeta.GetAnnotations()
|
||||
if _, ok := annotation["downscaler/downtime_replicas"]; !ok {
|
||||
t.Errorf("in %s respective annotation not found on sts", testName)
|
||||
t.Errorf("in %s respective annotation not found on sts", t.Name())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestStatefulSetUpdateWithEnv(t *testing.T) {
|
||||
oldSpec := &acidv1.PostgresSpec{
|
||||
TeamID: "myapp", NumberOfInstances: 1,
|
||||
Resources: &acidv1.Resources{
|
||||
ResourceRequests: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
ResourceLimits: acidv1.ResourceDescription{CPU: "1", Memory: "10"},
|
||||
},
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1G",
|
||||
},
|
||||
}
|
||||
oldSS, err := cl.generateStatefulSet(oldSpec)
|
||||
if err != nil {
|
||||
t.Errorf("in %s no StatefulSet created %v", t.Name(), err)
|
||||
}
|
||||
|
||||
newSpec := oldSpec.DeepCopy()
|
||||
newSS, err := cl.generateStatefulSet(newSpec)
|
||||
if err != nil {
|
||||
t.Errorf("in %s no StatefulSet created %v", t.Name(), err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(oldSS, newSS) {
|
||||
t.Errorf("in %s StatefulSet's must be equal", t.Name())
|
||||
}
|
||||
|
||||
newSpec.Env = []v1.EnvVar{
|
||||
{
|
||||
Name: "CUSTOM_ENV_VARIABLE",
|
||||
Value: "data",
|
||||
},
|
||||
}
|
||||
newSS, err = cl.generateStatefulSet(newSpec)
|
||||
if err != nil {
|
||||
t.Errorf("in %s no StatefulSet created %v", t.Name(), err)
|
||||
}
|
||||
|
||||
if reflect.DeepEqual(oldSS, newSS) {
|
||||
t.Errorf("in %s StatefulSet's must be not equal", t.Name())
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitRobotUsers(t *testing.T) {
|
||||
testName := "TestInitRobotUsers"
|
||||
tests := []struct {
|
||||
manifestUsers map[string]acidv1.UserFlags
|
||||
infraRoles map[string]spec.PgUser
|
||||
|
|
@ -130,22 +169,20 @@ func TestInitRobotUsers(t *testing.T) {
|
|||
cl.pgUsers = tt.infraRoles
|
||||
if err := cl.initRobotUsers(); err != nil {
|
||||
if tt.err == nil {
|
||||
t.Errorf("%s got an unexpected error: %v", testName, err)
|
||||
t.Errorf("%s got an unexpected error: %v", t.Name(), err)
|
||||
}
|
||||
if err.Error() != tt.err.Error() {
|
||||
t.Errorf("%s expected error %v, got %v", testName, tt.err, err)
|
||||
t.Errorf("%s expected error %v, got %v", t.Name(), tt.err, err)
|
||||
}
|
||||
} else {
|
||||
if !reflect.DeepEqual(cl.pgUsers, tt.result) {
|
||||
t.Errorf("%s expected: %#v, got %#v", testName, tt.result, cl.pgUsers)
|
||||
t.Errorf("%s expected: %#v, got %#v", t.Name(), tt.result, cl.pgUsers)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestInitAdditionalOwnerRoles(t *testing.T) {
|
||||
testName := "TestInitAdditionalOwnerRoles"
|
||||
|
||||
manifestUsers := map[string]acidv1.UserFlags{"foo_owner": {}, "bar_owner": {}, "app_user": {}}
|
||||
expectedUsers := map[string]spec.PgUser{
|
||||
"foo_owner": {Origin: spec.RoleOriginManifest, Name: "foo_owner", Namespace: cl.Namespace, Password: "f123", Flags: []string{"LOGIN"}, IsDbOwner: true, MemberOf: []string{"cron_admin", "part_man"}},
|
||||
|
|
@ -158,7 +195,7 @@ func TestInitAdditionalOwnerRoles(t *testing.T) {
|
|||
|
||||
// this should set IsDbOwner field for manifest users
|
||||
if err := cl.initRobotUsers(); err != nil {
|
||||
t.Errorf("%s could not init manifest users", testName)
|
||||
t.Errorf("%s could not init manifest users", t.Name())
|
||||
}
|
||||
|
||||
// now assign additional roles to owners
|
||||
|
|
@ -169,7 +206,7 @@ func TestInitAdditionalOwnerRoles(t *testing.T) {
|
|||
expectedPgUser := expectedUsers[username]
|
||||
if !util.IsEqualIgnoreOrder(expectedPgUser.MemberOf, existingPgUser.MemberOf) {
|
||||
t.Errorf("%s unexpected membership of user %q: expected member of %#v, got member of %#v",
|
||||
testName, username, expectedPgUser.MemberOf, existingPgUser.MemberOf)
|
||||
t.Name(), username, expectedPgUser.MemberOf, existingPgUser.MemberOf)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -186,7 +223,14 @@ type mockTeamsAPIClient struct {
|
|||
}
|
||||
|
||||
func (m *mockTeamsAPIClient) TeamInfo(teamID, token string) (tm *teams.Team, statusCode int, err error) {
|
||||
return &teams.Team{Members: m.members}, statusCode, nil
|
||||
if len(m.members) > 0 {
|
||||
return &teams.Team{Members: m.members}, http.StatusOK, nil
|
||||
}
|
||||
|
||||
// when members are not set handle this as an error for this mock API
|
||||
// makes it easier to test behavior when teams API is unavailable
|
||||
return nil, http.StatusInternalServerError,
|
||||
fmt.Errorf("mocked %d error of mock Teams API for team %q", http.StatusInternalServerError, teamID)
|
||||
}
|
||||
|
||||
func (m *mockTeamsAPIClient) setMembers(members []string) {
|
||||
|
|
@ -195,48 +239,67 @@ func (m *mockTeamsAPIClient) setMembers(members []string) {
|
|||
|
||||
// Test adding a member of a product team owning a particular DB cluster
|
||||
func TestInitHumanUsers(t *testing.T) {
|
||||
|
||||
var mockTeamsAPI mockTeamsAPIClient
|
||||
cl.oauthTokenGetter = &mockOAuthTokenGetter{}
|
||||
cl.teamsAPIClient = &mockTeamsAPI
|
||||
testName := "TestInitHumanUsers"
|
||||
|
||||
// members of a product team are granted superuser rights for DBs of their team
|
||||
cl.OpConfig.EnableTeamSuperuser = true
|
||||
|
||||
cl.OpConfig.EnableTeamsAPI = true
|
||||
cl.OpConfig.EnableTeamMemberDeprecation = true
|
||||
cl.OpConfig.PamRoleName = "zalandos"
|
||||
cl.Spec.TeamID = "test"
|
||||
cl.Spec.Users = map[string]acidv1.UserFlags{"bar": []string{}}
|
||||
|
||||
tests := []struct {
|
||||
existingRoles map[string]spec.PgUser
|
||||
teamRoles []string
|
||||
result map[string]spec.PgUser
|
||||
err error
|
||||
}{
|
||||
{
|
||||
existingRoles: map[string]spec.PgUser{"foo": {Name: "foo", Origin: spec.RoleOriginTeamsAPI,
|
||||
Flags: []string{"NOLOGIN"}}, "bar": {Name: "bar", Flags: []string{"NOLOGIN"}}},
|
||||
Flags: []string{"LOGIN"}}, "bar": {Name: "bar", Flags: []string{"LOGIN"}}},
|
||||
teamRoles: []string{"foo"},
|
||||
result: map[string]spec.PgUser{"foo": {Name: "foo", Origin: spec.RoleOriginTeamsAPI,
|
||||
MemberOf: []string{cl.OpConfig.PamRoleName}, Flags: []string{"LOGIN", "SUPERUSER"}},
|
||||
"bar": {Name: "bar", Flags: []string{"NOLOGIN"}}},
|
||||
"bar": {Name: "bar", Flags: []string{"LOGIN"}}},
|
||||
err: fmt.Errorf("could not init human users: cannot initialize members for team %q who owns the Postgres cluster: could not get list of team members for team %q: could not get team info for team %q: mocked %d error of mock Teams API for team %q",
|
||||
cl.Spec.TeamID, cl.Spec.TeamID, cl.Spec.TeamID, http.StatusInternalServerError, cl.Spec.TeamID),
|
||||
},
|
||||
{
|
||||
existingRoles: map[string]spec.PgUser{},
|
||||
teamRoles: []string{"admin", replicationUserName},
|
||||
result: map[string]spec.PgUser{},
|
||||
err: nil,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
// set pgUsers so that initUsers sets up pgUsersCache with team roles
|
||||
cl.pgUsers = tt.existingRoles
|
||||
|
||||
// initUsers calls initHumanUsers which should fail
|
||||
// because no members are set for mocked teams API
|
||||
if err := cl.initUsers(); err != nil {
|
||||
// check that at least team roles are remembered in c.pgUsers
|
||||
if len(cl.pgUsers) < len(tt.teamRoles) {
|
||||
t.Errorf("%s unexpected size of pgUsers: expected at least %d, got %d", t.Name(), len(tt.teamRoles), len(cl.pgUsers))
|
||||
}
|
||||
if err.Error() != tt.err.Error() {
|
||||
t.Errorf("%s expected error %v, got %v", t.Name(), err, tt.err)
|
||||
}
|
||||
}
|
||||
|
||||
// set pgUsers again to test initHumanUsers with working teams API
|
||||
cl.pgUsers = tt.existingRoles
|
||||
mockTeamsAPI.setMembers(tt.teamRoles)
|
||||
if err := cl.initHumanUsers(); err != nil {
|
||||
t.Errorf("%s got an unexpected error %v", testName, err)
|
||||
t.Errorf("%s got an unexpected error %v", t.Name(), err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(cl.pgUsers, tt.result) {
|
||||
t.Errorf("%s expects %#v, got %#v", testName, tt.result, cl.pgUsers)
|
||||
t.Errorf("%s expects %#v, got %#v", t.Name(), tt.result, cl.pgUsers)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -254,22 +317,22 @@ type mockTeamsAPIClientMultipleTeams struct {
|
|||
func (m *mockTeamsAPIClientMultipleTeams) TeamInfo(teamID, token string) (tm *teams.Team, statusCode int, err error) {
|
||||
for _, team := range m.teams {
|
||||
if team.teamID == teamID {
|
||||
return &teams.Team{Members: team.members}, statusCode, nil
|
||||
return &teams.Team{Members: team.members}, http.StatusOK, nil
|
||||
}
|
||||
}
|
||||
|
||||
// should not be reached if a slice with teams is populated correctly
|
||||
return nil, statusCode, nil
|
||||
// when given teamId is not found in teams return StatusNotFound
|
||||
// the operator should only return a warning in this case and not error out (#1842)
|
||||
return nil, http.StatusNotFound,
|
||||
fmt.Errorf("mocked %d error of mock Teams API for team %q", http.StatusNotFound, teamID)
|
||||
}
|
||||
|
||||
// Test adding members of maintenance teams that get superuser rights for all PG databases
|
||||
func TestInitHumanUsersWithSuperuserTeams(t *testing.T) {
|
||||
|
||||
var mockTeamsAPI mockTeamsAPIClientMultipleTeams
|
||||
cl.oauthTokenGetter = &mockOAuthTokenGetter{}
|
||||
cl.teamsAPIClient = &mockTeamsAPI
|
||||
cl.OpConfig.EnableTeamSuperuser = false
|
||||
testName := "TestInitHumanUsersWithSuperuserTeams"
|
||||
|
||||
cl.OpConfig.EnableTeamsAPI = true
|
||||
cl.OpConfig.PamRoleName = "zalandos"
|
||||
|
|
@ -360,6 +423,16 @@ func TestInitHumanUsersWithSuperuserTeams(t *testing.T) {
|
|||
"postgres_superuser": userA,
|
||||
},
|
||||
},
|
||||
// case 4: the team does not exist which should not return an error
|
||||
{
|
||||
ownerTeam: "acid",
|
||||
existingRoles: map[string]spec.PgUser{},
|
||||
superuserTeams: []string{"postgres_superusers"},
|
||||
teams: []mockTeam{teamA, teamB, teamTest},
|
||||
result: map[string]spec.PgUser{
|
||||
"postgres_superuser": userA,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
|
|
@ -371,17 +444,16 @@ func TestInitHumanUsersWithSuperuserTeams(t *testing.T) {
|
|||
cl.OpConfig.PostgresSuperuserTeams = tt.superuserTeams
|
||||
|
||||
if err := cl.initHumanUsers(); err != nil {
|
||||
t.Errorf("%s got an unexpected error %v", testName, err)
|
||||
t.Errorf("%s got an unexpected error %v", t.Name(), err)
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(cl.pgUsers, tt.result) {
|
||||
t.Errorf("%s expects %#v, got %#v", testName, tt.result, cl.pgUsers)
|
||||
t.Errorf("%s expects %#v, got %#v", t.Name(), tt.result, cl.pgUsers)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPodAnnotations(t *testing.T) {
|
||||
testName := "TestPodAnnotations"
|
||||
tests := []struct {
|
||||
subTest string
|
||||
operator map[string]string
|
||||
|
|
@ -428,13 +500,13 @@ func TestPodAnnotations(t *testing.T) {
|
|||
for k, v := range annotations {
|
||||
if observed, expected := v, tt.merged[k]; observed != expected {
|
||||
t.Errorf("%v expects annotation value %v for key %v, but found %v",
|
||||
testName+"/"+tt.subTest, expected, observed, k)
|
||||
t.Name()+"/"+tt.subTest, expected, observed, k)
|
||||
}
|
||||
}
|
||||
for k, v := range tt.merged {
|
||||
if observed, expected := annotations[k], v; observed != expected {
|
||||
t.Errorf("%v expects annotation value %v for key %v, but found %v",
|
||||
testName+"/"+tt.subTest, expected, observed, k)
|
||||
t.Name()+"/"+tt.subTest, expected, observed, k)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -450,8 +522,11 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
enableMasterLoadBalancerOC bool
|
||||
enableReplicaLoadBalancerSpec *bool
|
||||
enableReplicaLoadBalancerOC bool
|
||||
enableTeamIdClusterPrefix bool
|
||||
operatorAnnotations map[string]string
|
||||
clusterAnnotations map[string]string
|
||||
serviceAnnotations map[string]string
|
||||
masterServiceAnnotations map[string]string
|
||||
replicaServiceAnnotations map[string]string
|
||||
expect map[string]string
|
||||
}{
|
||||
//MASTER
|
||||
|
|
@ -460,8 +535,9 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
role: "master",
|
||||
enableMasterLoadBalancerSpec: &disabled,
|
||||
enableMasterLoadBalancerOC: false,
|
||||
enableTeamIdClusterPrefix: false,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: make(map[string]string),
|
||||
serviceAnnotations: make(map[string]string),
|
||||
expect: make(map[string]string),
|
||||
},
|
||||
{
|
||||
|
|
@ -469,10 +545,11 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
role: "master",
|
||||
enableMasterLoadBalancerSpec: &enabled,
|
||||
enableMasterLoadBalancerOC: false,
|
||||
enableTeamIdClusterPrefix: false,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: make(map[string]string),
|
||||
serviceAnnotations: make(map[string]string),
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg.test.db.example.com,test-stg.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
},
|
||||
},
|
||||
|
|
@ -481,18 +558,20 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
role: "master",
|
||||
enableMasterLoadBalancerSpec: &disabled,
|
||||
enableMasterLoadBalancerOC: true,
|
||||
enableTeamIdClusterPrefix: false,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: make(map[string]string),
|
||||
serviceAnnotations: make(map[string]string),
|
||||
expect: make(map[string]string),
|
||||
},
|
||||
{
|
||||
about: "Master with no annotations and EnableMasterLoadBalancer defined only on operator config",
|
||||
role: "master",
|
||||
enableMasterLoadBalancerOC: true,
|
||||
enableTeamIdClusterPrefix: false,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: make(map[string]string),
|
||||
serviceAnnotations: make(map[string]string),
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg.test.db.example.com,test-stg.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
},
|
||||
},
|
||||
|
|
@ -500,10 +579,11 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
about: "Master with cluster annotations and load balancer enabled",
|
||||
role: "master",
|
||||
enableMasterLoadBalancerOC: true,
|
||||
enableTeamIdClusterPrefix: false,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: map[string]string{"foo": "bar"},
|
||||
serviceAnnotations: map[string]string{"foo": "bar"},
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg.test.db.example.com,test-stg.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
"foo": "bar",
|
||||
},
|
||||
|
|
@ -513,18 +593,20 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
role: "master",
|
||||
enableMasterLoadBalancerSpec: &disabled,
|
||||
enableMasterLoadBalancerOC: true,
|
||||
enableTeamIdClusterPrefix: false,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: map[string]string{"foo": "bar"},
|
||||
serviceAnnotations: map[string]string{"foo": "bar"},
|
||||
expect: map[string]string{"foo": "bar"},
|
||||
},
|
||||
{
|
||||
about: "Master with operator annotations and load balancer enabled",
|
||||
role: "master",
|
||||
enableMasterLoadBalancerOC: true,
|
||||
enableTeamIdClusterPrefix: false,
|
||||
operatorAnnotations: map[string]string{"foo": "bar"},
|
||||
clusterAnnotations: make(map[string]string),
|
||||
serviceAnnotations: make(map[string]string),
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg.test.db.example.com,test-stg.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
"foo": "bar",
|
||||
},
|
||||
|
|
@ -533,12 +615,13 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
about: "Master with operator annotations override default annotations",
|
||||
role: "master",
|
||||
enableMasterLoadBalancerOC: true,
|
||||
enableTeamIdClusterPrefix: false,
|
||||
operatorAnnotations: map[string]string{
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800",
|
||||
},
|
||||
clusterAnnotations: make(map[string]string),
|
||||
serviceAnnotations: make(map[string]string),
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg.test.db.example.com,test-stg.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800",
|
||||
},
|
||||
},
|
||||
|
|
@ -546,12 +629,13 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
about: "Master with cluster annotations override default annotations",
|
||||
role: "master",
|
||||
enableMasterLoadBalancerOC: true,
|
||||
enableTeamIdClusterPrefix: false,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: map[string]string{
|
||||
serviceAnnotations: map[string]string{
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800",
|
||||
},
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg.test.db.example.com,test-stg.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800",
|
||||
},
|
||||
},
|
||||
|
|
@ -559,26 +643,45 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
about: "Master with cluster annotations do not override external-dns annotations",
|
||||
role: "master",
|
||||
enableMasterLoadBalancerOC: true,
|
||||
enableTeamIdClusterPrefix: false,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: map[string]string{
|
||||
serviceAnnotations: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "wrong.external-dns-name.example.com",
|
||||
},
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg.test.db.example.com,test-stg.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
},
|
||||
},
|
||||
{
|
||||
about: "Master with operator annotations do not override external-dns annotations",
|
||||
about: "Master with cluster name teamId prefix enabled",
|
||||
role: "master",
|
||||
enableMasterLoadBalancerOC: true,
|
||||
clusterAnnotations: make(map[string]string),
|
||||
operatorAnnotations: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "wrong.external-dns-name.example.com",
|
||||
enableTeamIdClusterPrefix: true,
|
||||
serviceAnnotations: make(map[string]string),
|
||||
operatorAnnotations: make(map[string]string),
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg.test.db.example.com,test-stg.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
},
|
||||
},
|
||||
{
|
||||
about: "Master with master service annotations override service annotations",
|
||||
role: "master",
|
||||
enableMasterLoadBalancerOC: true,
|
||||
enableTeamIdClusterPrefix: false,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
serviceAnnotations: map[string]string{
|
||||
"service.beta.kubernetes.io/aws-load-balancer-nlb-target-type": "ip",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800",
|
||||
},
|
||||
masterServiceAnnotations: map[string]string{
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "2000",
|
||||
},
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg.test.db.example.com,test-stg.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-nlb-target-type": "ip",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "2000",
|
||||
},
|
||||
},
|
||||
// REPLICA
|
||||
|
|
@ -587,8 +690,9 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
role: "replica",
|
||||
enableReplicaLoadBalancerSpec: &disabled,
|
||||
enableReplicaLoadBalancerOC: false,
|
||||
enableTeamIdClusterPrefix: false,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: make(map[string]string),
|
||||
serviceAnnotations: make(map[string]string),
|
||||
expect: make(map[string]string),
|
||||
},
|
||||
{
|
||||
|
|
@ -596,10 +700,11 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
role: "replica",
|
||||
enableReplicaLoadBalancerSpec: &enabled,
|
||||
enableReplicaLoadBalancerOC: false,
|
||||
enableTeamIdClusterPrefix: false,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: make(map[string]string),
|
||||
serviceAnnotations: make(map[string]string),
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg-repl.test.db.example.com,test-stg-repl.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
},
|
||||
},
|
||||
|
|
@ -608,18 +713,20 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
role: "replica",
|
||||
enableReplicaLoadBalancerSpec: &disabled,
|
||||
enableReplicaLoadBalancerOC: true,
|
||||
enableTeamIdClusterPrefix: false,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: make(map[string]string),
|
||||
serviceAnnotations: make(map[string]string),
|
||||
expect: make(map[string]string),
|
||||
},
|
||||
{
|
||||
about: "Replica with no annotations and EnableReplicaLoadBalancer defined only on operator config",
|
||||
role: "replica",
|
||||
enableReplicaLoadBalancerOC: true,
|
||||
enableTeamIdClusterPrefix: false,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: make(map[string]string),
|
||||
serviceAnnotations: make(map[string]string),
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg-repl.test.db.example.com,test-stg-repl.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
},
|
||||
},
|
||||
|
|
@ -627,10 +734,11 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
about: "Replica with cluster annotations and load balancer enabled",
|
||||
role: "replica",
|
||||
enableReplicaLoadBalancerOC: true,
|
||||
enableTeamIdClusterPrefix: false,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: map[string]string{"foo": "bar"},
|
||||
serviceAnnotations: map[string]string{"foo": "bar"},
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg-repl.test.db.example.com,test-stg-repl.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
"foo": "bar",
|
||||
},
|
||||
|
|
@ -640,18 +748,20 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
role: "replica",
|
||||
enableReplicaLoadBalancerSpec: &disabled,
|
||||
enableReplicaLoadBalancerOC: true,
|
||||
enableTeamIdClusterPrefix: false,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: map[string]string{"foo": "bar"},
|
||||
serviceAnnotations: map[string]string{"foo": "bar"},
|
||||
expect: map[string]string{"foo": "bar"},
|
||||
},
|
||||
{
|
||||
about: "Replica with operator annotations and load balancer enabled",
|
||||
role: "replica",
|
||||
enableReplicaLoadBalancerOC: true,
|
||||
enableTeamIdClusterPrefix: false,
|
||||
operatorAnnotations: map[string]string{"foo": "bar"},
|
||||
clusterAnnotations: make(map[string]string),
|
||||
serviceAnnotations: make(map[string]string),
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg-repl.test.db.example.com,test-stg-repl.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
"foo": "bar",
|
||||
},
|
||||
|
|
@ -660,12 +770,13 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
about: "Replica with operator annotations override default annotations",
|
||||
role: "replica",
|
||||
enableReplicaLoadBalancerOC: true,
|
||||
enableTeamIdClusterPrefix: false,
|
||||
operatorAnnotations: map[string]string{
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800",
|
||||
},
|
||||
clusterAnnotations: make(map[string]string),
|
||||
serviceAnnotations: make(map[string]string),
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg-repl.test.db.example.com,test-stg-repl.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800",
|
||||
},
|
||||
},
|
||||
|
|
@ -673,12 +784,13 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
about: "Replica with cluster annotations override default annotations",
|
||||
role: "replica",
|
||||
enableReplicaLoadBalancerOC: true,
|
||||
enableTeamIdClusterPrefix: false,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: map[string]string{
|
||||
serviceAnnotations: map[string]string{
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800",
|
||||
},
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg-repl.test.db.example.com,test-stg-repl.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800",
|
||||
},
|
||||
},
|
||||
|
|
@ -686,26 +798,45 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
about: "Replica with cluster annotations do not override external-dns annotations",
|
||||
role: "replica",
|
||||
enableReplicaLoadBalancerOC: true,
|
||||
enableTeamIdClusterPrefix: false,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
clusterAnnotations: map[string]string{
|
||||
serviceAnnotations: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "wrong.external-dns-name.example.com",
|
||||
},
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg-repl.test.db.example.com,test-stg-repl.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
},
|
||||
},
|
||||
{
|
||||
about: "Replica with operator annotations do not override external-dns annotations",
|
||||
about: "Replica with cluster name teamId prefix enabled",
|
||||
role: "replica",
|
||||
enableReplicaLoadBalancerOC: true,
|
||||
clusterAnnotations: make(map[string]string),
|
||||
operatorAnnotations: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "wrong.external-dns-name.example.com",
|
||||
enableTeamIdClusterPrefix: true,
|
||||
serviceAnnotations: make(map[string]string),
|
||||
operatorAnnotations: make(map[string]string),
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg-repl.test.db.example.com,test-stg-repl.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
},
|
||||
},
|
||||
{
|
||||
about: "Replica with replica service annotations override service annotations",
|
||||
role: "replica",
|
||||
enableReplicaLoadBalancerOC: true,
|
||||
enableTeamIdClusterPrefix: false,
|
||||
operatorAnnotations: make(map[string]string),
|
||||
serviceAnnotations: map[string]string{
|
||||
"service.beta.kubernetes.io/aws-load-balancer-nlb-target-type": "ip",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "1800",
|
||||
},
|
||||
replicaServiceAnnotations: map[string]string{
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "2000",
|
||||
},
|
||||
expect: map[string]string{
|
||||
"external-dns.alpha.kubernetes.io/hostname": "test-repl.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "3600",
|
||||
"external-dns.alpha.kubernetes.io/hostname": "acid-test-stg-repl.test.db.example.com,test-stg-repl.acid.db.example.com",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-nlb-target-type": "ip",
|
||||
"service.beta.kubernetes.io/aws-load-balancer-connection-idle-timeout": "2000",
|
||||
},
|
||||
},
|
||||
// COMMON
|
||||
|
|
@ -713,32 +844,40 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
about: "cluster annotations append to operator annotations",
|
||||
role: "replica",
|
||||
enableReplicaLoadBalancerOC: false,
|
||||
enableTeamIdClusterPrefix: false,
|
||||
operatorAnnotations: map[string]string{"foo": "bar"},
|
||||
clusterAnnotations: map[string]string{"post": "gres"},
|
||||
serviceAnnotations: map[string]string{"post": "gres"},
|
||||
expect: map[string]string{"foo": "bar", "post": "gres"},
|
||||
},
|
||||
{
|
||||
about: "cluster annotations override operator annotations",
|
||||
role: "replica",
|
||||
enableReplicaLoadBalancerOC: false,
|
||||
enableTeamIdClusterPrefix: false,
|
||||
operatorAnnotations: map[string]string{"foo": "bar", "post": "gres"},
|
||||
clusterAnnotations: map[string]string{"post": "greSQL"},
|
||||
serviceAnnotations: map[string]string{"post": "greSQL"},
|
||||
expect: map[string]string{"foo": "bar", "post": "greSQL"},
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.about, func(t *testing.T) {
|
||||
cl.OpConfig.EnableTeamIdClusternamePrefix = tt.enableTeamIdClusterPrefix
|
||||
|
||||
cl.OpConfig.CustomServiceAnnotations = tt.operatorAnnotations
|
||||
cl.OpConfig.EnableMasterLoadBalancer = tt.enableMasterLoadBalancerOC
|
||||
cl.OpConfig.EnableReplicaLoadBalancer = tt.enableReplicaLoadBalancerOC
|
||||
cl.OpConfig.MasterDNSNameFormat = "{cluster}.{team}.{hostedzone}"
|
||||
cl.OpConfig.ReplicaDNSNameFormat = "{cluster}-repl.{team}.{hostedzone}"
|
||||
cl.OpConfig.MasterDNSNameFormat = "{cluster}-stg.{namespace}.{hostedzone}"
|
||||
cl.OpConfig.MasterLegacyDNSNameFormat = "{cluster}-stg.{team}.{hostedzone}"
|
||||
cl.OpConfig.ReplicaDNSNameFormat = "{cluster}-stg-repl.{namespace}.{hostedzone}"
|
||||
cl.OpConfig.ReplicaLegacyDNSNameFormat = "{cluster}-stg-repl.{team}.{hostedzone}"
|
||||
cl.OpConfig.DbHostedZone = "db.example.com"
|
||||
|
||||
cl.Postgresql.Spec.ClusterName = "test"
|
||||
cl.Postgresql.Spec.ClusterName = ""
|
||||
cl.Postgresql.Spec.TeamID = "acid"
|
||||
cl.Postgresql.Spec.ServiceAnnotations = tt.clusterAnnotations
|
||||
cl.Postgresql.Spec.ServiceAnnotations = tt.serviceAnnotations
|
||||
cl.Postgresql.Spec.MasterServiceAnnotations = tt.masterServiceAnnotations
|
||||
cl.Postgresql.Spec.ReplicaServiceAnnotations = tt.replicaServiceAnnotations
|
||||
cl.Postgresql.Spec.EnableMasterLoadBalancer = tt.enableMasterLoadBalancerSpec
|
||||
cl.Postgresql.Spec.EnableReplicaLoadBalancer = tt.enableReplicaLoadBalancerSpec
|
||||
|
||||
|
|
@ -757,19 +896,20 @@ func TestServiceAnnotations(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestInitSystemUsers(t *testing.T) {
|
||||
testName := "Test system users initialization"
|
||||
|
||||
// default cluster without connection pooler
|
||||
// default cluster without connection pooler and event streams
|
||||
cl.initSystemUsers()
|
||||
if _, exist := cl.systemUsers[constants.ConnectionPoolerUserKeyName]; exist {
|
||||
t.Errorf("%s, connection pooler user is present", testName)
|
||||
t.Errorf("%s, connection pooler user is present", t.Name())
|
||||
}
|
||||
if _, exist := cl.systemUsers[constants.EventStreamUserKeyName]; exist {
|
||||
t.Errorf("%s, stream user is present", t.Name())
|
||||
}
|
||||
|
||||
// cluster with connection pooler
|
||||
cl.Spec.EnableConnectionPooler = boolToPointer(true)
|
||||
cl.initSystemUsers()
|
||||
if _, exist := cl.systemUsers[constants.ConnectionPoolerUserKeyName]; !exist {
|
||||
t.Errorf("%s, connection pooler user is not present", testName)
|
||||
t.Errorf("%s, connection pooler user is not present", t.Name())
|
||||
}
|
||||
|
||||
// superuser is not allowed as connection pool user
|
||||
|
|
@ -780,47 +920,70 @@ func TestInitSystemUsers(t *testing.T) {
|
|||
cl.OpConfig.ConnectionPooler.User = "pooler"
|
||||
|
||||
cl.initSystemUsers()
|
||||
if _, exist := cl.pgUsers["pooler"]; !exist {
|
||||
t.Errorf("%s, Superuser is not allowed to be a connection pool user", testName)
|
||||
if _, exist := cl.systemUsers["pooler"]; !exist {
|
||||
t.Errorf("%s, Superuser is not allowed to be a connection pool user", t.Name())
|
||||
}
|
||||
|
||||
// neither protected users are
|
||||
delete(cl.pgUsers, "pooler")
|
||||
delete(cl.systemUsers, "pooler")
|
||||
cl.Spec.ConnectionPooler = &acidv1.ConnectionPooler{
|
||||
User: "admin",
|
||||
}
|
||||
cl.OpConfig.ProtectedRoles = []string{"admin"}
|
||||
|
||||
cl.initSystemUsers()
|
||||
if _, exist := cl.pgUsers["pooler"]; !exist {
|
||||
t.Errorf("%s, Protected user are not allowed to be a connection pool user", testName)
|
||||
if _, exist := cl.systemUsers["pooler"]; !exist {
|
||||
t.Errorf("%s, Protected user are not allowed to be a connection pool user", t.Name())
|
||||
}
|
||||
|
||||
delete(cl.pgUsers, "pooler")
|
||||
delete(cl.systemUsers, "pooler")
|
||||
cl.Spec.ConnectionPooler = &acidv1.ConnectionPooler{
|
||||
User: "standby",
|
||||
}
|
||||
|
||||
cl.initSystemUsers()
|
||||
if _, exist := cl.pgUsers["pooler"]; !exist {
|
||||
t.Errorf("%s, System users are not allowed to be a connection pool user", testName)
|
||||
if _, exist := cl.systemUsers["pooler"]; !exist {
|
||||
t.Errorf("%s, System users are not allowed to be a connection pool user", t.Name())
|
||||
}
|
||||
|
||||
// using stream user in manifest but no streams defined should be treated like normal robot user
|
||||
streamUser := fmt.Sprintf("%s%s", constants.EventStreamSourceSlotPrefix, constants.UserRoleNameSuffix)
|
||||
cl.Spec.Users = map[string]acidv1.UserFlags{streamUser: []string{}}
|
||||
cl.initSystemUsers()
|
||||
if _, exist := cl.systemUsers[constants.EventStreamUserKeyName]; exist {
|
||||
t.Errorf("%s, stream user is present", t.Name())
|
||||
}
|
||||
|
||||
// cluster with streams
|
||||
cl.Spec.Streams = []acidv1.Stream{
|
||||
{
|
||||
ApplicationId: "test-app",
|
||||
Database: "test_db",
|
||||
Tables: map[string]acidv1.StreamTable{
|
||||
"data.test_table": {
|
||||
EventType: "test_event",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
cl.initSystemUsers()
|
||||
if _, exist := cl.systemUsers[constants.EventStreamUserKeyName]; !exist {
|
||||
t.Errorf("%s, stream user is not present", t.Name())
|
||||
}
|
||||
}
|
||||
|
||||
func TestPreparedDatabases(t *testing.T) {
|
||||
testName := "TestDefaultPreparedDatabase"
|
||||
|
||||
cl.Spec.PreparedDatabases = map[string]acidv1.PreparedDatabase{}
|
||||
cl.initPreparedDatabaseRoles()
|
||||
|
||||
for _, role := range []string{"acid_test_owner", "acid_test_reader", "acid_test_writer",
|
||||
"acid_test_data_owner", "acid_test_data_reader", "acid_test_data_writer"} {
|
||||
if _, exist := cl.pgUsers[role]; !exist {
|
||||
t.Errorf("%s, default role %q for prepared database not present", testName, role)
|
||||
t.Errorf("%s, default role %q for prepared database not present", t.Name(), role)
|
||||
}
|
||||
}
|
||||
|
||||
testName = "TestPreparedDatabaseWithSchema"
|
||||
testName := "TestPreparedDatabaseWithSchema"
|
||||
|
||||
cl.Spec.PreparedDatabases = map[string]acidv1.PreparedDatabase{
|
||||
"foo": {
|
||||
|
|
@ -1058,7 +1221,6 @@ func newService(ann map[string]string, svcT v1.ServiceType, lbSr []string) *v1.S
|
|||
}
|
||||
|
||||
func TestCompareServices(t *testing.T) {
|
||||
testName := "TestCompareServices"
|
||||
cluster := Cluster{
|
||||
Config: Config{
|
||||
OpConfig: config.Config{
|
||||
|
|
@ -1359,16 +1521,16 @@ func TestCompareServices(t *testing.T) {
|
|||
match, reason := cluster.compareServices(tt.current, tt.new)
|
||||
if match && !tt.match {
|
||||
t.Logf("match=%v current=%v, old=%v reason=%s", match, tt.current.Annotations, tt.new.Annotations, reason)
|
||||
t.Errorf("%s - expected services to do not match: %q and %q", testName, tt.current, tt.new)
|
||||
t.Errorf("%s - expected services to do not match: %q and %q", t.Name(), tt.current, tt.new)
|
||||
return
|
||||
}
|
||||
if !match && tt.match {
|
||||
t.Errorf("%s - expected services to be the same: %q and %q", testName, tt.current, tt.new)
|
||||
t.Errorf("%s - expected services to be the same: %q and %q", t.Name(), tt.current, tt.new)
|
||||
return
|
||||
}
|
||||
if !match && !tt.match {
|
||||
if !strings.HasPrefix(reason, tt.reason) {
|
||||
t.Errorf("%s - expected reason prefix %s, found %s", testName, tt.reason, reason)
|
||||
t.Errorf("%s - expected reason prefix %s, found %s", t.Name(), tt.reason, reason)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,7 +3,9 @@ package cluster
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/r3labs/diff"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
|
@ -20,6 +22,7 @@ import (
|
|||
"github.com/zalando/postgres-operator/pkg/util/config"
|
||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||
"github.com/zalando/postgres-operator/pkg/util/retryutil"
|
||||
)
|
||||
|
||||
// ConnectionPoolerObjects K8s objects that are belong to connection pooler
|
||||
|
|
@ -42,9 +45,9 @@ type ConnectionPoolerObjects struct {
|
|||
}
|
||||
|
||||
func (c *Cluster) connectionPoolerName(role PostgresRole) string {
|
||||
name := c.Name + "-pooler"
|
||||
name := fmt.Sprintf("%s-%s", c.Name, constants.ConnectionPoolerResourceSuffix)
|
||||
if role == Replica {
|
||||
name = name + "-repl"
|
||||
name = fmt.Sprintf("%s-%s", name, "repl")
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
|
@ -73,27 +76,67 @@ func needReplicaConnectionPoolerWorker(spec *acidv1.PostgresSpec) bool {
|
|||
*spec.EnableReplicaConnectionPooler
|
||||
}
|
||||
|
||||
func (c *Cluster) needConnectionPoolerUser(oldSpec, newSpec *acidv1.PostgresSpec) bool {
|
||||
// return true if pooler is needed AND was not disabled before OR user name differs
|
||||
return (needMasterConnectionPoolerWorker(newSpec) || needReplicaConnectionPoolerWorker(newSpec)) &&
|
||||
((!needMasterConnectionPoolerWorker(oldSpec) &&
|
||||
!needReplicaConnectionPoolerWorker(oldSpec)) ||
|
||||
c.poolerUser(oldSpec) != c.poolerUser(newSpec))
|
||||
}
|
||||
|
||||
func (c *Cluster) poolerUser(spec *acidv1.PostgresSpec) string {
|
||||
connectionPoolerSpec := spec.ConnectionPooler
|
||||
if connectionPoolerSpec == nil {
|
||||
connectionPoolerSpec = &acidv1.ConnectionPooler{}
|
||||
}
|
||||
// Using superuser as pooler user is not a good idea. First of all it's
|
||||
// not going to be synced correctly with the current implementation,
|
||||
// and second it's a bad practice.
|
||||
username := c.OpConfig.ConnectionPooler.User
|
||||
|
||||
isSuperUser := connectionPoolerSpec.User == c.OpConfig.SuperUsername
|
||||
isProtectedUser := c.shouldAvoidProtectedOrSystemRole(
|
||||
connectionPoolerSpec.User, "connection pool role")
|
||||
|
||||
if !isSuperUser && !isProtectedUser {
|
||||
username = util.Coalesce(
|
||||
connectionPoolerSpec.User,
|
||||
c.OpConfig.ConnectionPooler.User)
|
||||
}
|
||||
|
||||
return username
|
||||
}
|
||||
|
||||
// when listing pooler k8s objects
|
||||
func (c *Cluster) poolerLabelsSet(addExtraLabels bool) labels.Set {
|
||||
poolerLabels := c.labelsSet(addExtraLabels)
|
||||
|
||||
// TODO should be config values
|
||||
poolerLabels["application"] = "db-connection-pooler"
|
||||
|
||||
return poolerLabels
|
||||
}
|
||||
|
||||
// Return connection pooler labels selector, which should from one point of view
|
||||
// inherit most of the labels from the cluster itself, but at the same time
|
||||
// have e.g. different `application` label, so that recreatePod operation will
|
||||
// not interfere with it (it lists all the pods via labels, and if there would
|
||||
// be no difference, it will recreate also pooler pods).
|
||||
func (c *Cluster) connectionPoolerLabels(role PostgresRole, addExtraLabels bool) *metav1.LabelSelector {
|
||||
poolerLabels := c.labelsSet(addExtraLabels)
|
||||
poolerLabelsSet := c.poolerLabelsSet(addExtraLabels)
|
||||
|
||||
// TODO should be config values
|
||||
poolerLabels["application"] = "db-connection-pooler"
|
||||
poolerLabels["connection-pooler"] = c.connectionPoolerName(role)
|
||||
poolerLabelsSet["connection-pooler"] = c.connectionPoolerName(role)
|
||||
|
||||
if addExtraLabels {
|
||||
extraLabels := map[string]string{}
|
||||
extraLabels[c.OpConfig.PodRoleLabel] = string(role)
|
||||
|
||||
poolerLabels = labels.Merge(poolerLabels, extraLabels)
|
||||
poolerLabelsSet = labels.Merge(poolerLabelsSet, extraLabels)
|
||||
}
|
||||
|
||||
return &metav1.LabelSelector{
|
||||
MatchLabels: poolerLabels,
|
||||
MatchLabels: poolerLabelsSet,
|
||||
MatchExpressions: nil,
|
||||
}
|
||||
}
|
||||
|
|
@ -120,24 +163,27 @@ func (c *Cluster) createConnectionPooler(LookupFunction InstallFunction) (SyncRe
|
|||
return reason, nil
|
||||
}
|
||||
|
||||
//
|
||||
// Generate pool size related environment variables.
|
||||
//
|
||||
// MAX_DB_CONN would specify the global maximum for connections to a target
|
||||
//
|
||||
// database.
|
||||
//
|
||||
// MAX_CLIENT_CONN is not configurable at the moment, just set it high enough.
|
||||
//
|
||||
// DEFAULT_SIZE is a pool size per db/user (having in mind the use case when
|
||||
//
|
||||
// most of the queries coming through a connection pooler are from the same
|
||||
// user to the same db). In case if we want to spin up more connection pooler
|
||||
// instances, take this into account and maintain the same number of
|
||||
// connections.
|
||||
//
|
||||
// MIN_SIZE is a pool's minimal size, to prevent situation when sudden workload
|
||||
//
|
||||
// have to wait for spinning up a new connections.
|
||||
//
|
||||
// RESERVE_SIZE is how many additional connections to allow for a pooler.
|
||||
|
||||
func (c *Cluster) getConnectionPoolerEnvVars() []v1.EnvVar {
|
||||
spec := &c.Spec
|
||||
connectionPoolerSpec := spec.ConnectionPooler
|
||||
|
|
@ -281,9 +327,8 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
|
|||
Protocol: v1.ProtocolTCP,
|
||||
},
|
||||
},
|
||||
Env: envVars,
|
||||
ReadinessProbe: &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
ProbeHandler: v1.ProbeHandler{
|
||||
TCPSocket: &v1.TCPSocketAction{
|
||||
Port: intstr.IntOrString{IntVal: pgPort},
|
||||
},
|
||||
|
|
@ -294,6 +339,53 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
|
|||
},
|
||||
}
|
||||
|
||||
// If the cluster has custom TLS certificates configured, we do the following:
|
||||
// 1. Add environment variables to tell pgBouncer where to find the TLS certificates
|
||||
// 2. Reference the secret in a volume
|
||||
// 3. Mount the volume to the container at /tls
|
||||
var poolerVolumes []v1.Volume
|
||||
if spec.TLS != nil && spec.TLS.SecretName != "" {
|
||||
// Env vars
|
||||
crtFile := spec.TLS.CertificateFile
|
||||
keyFile := spec.TLS.PrivateKeyFile
|
||||
if crtFile == "" {
|
||||
crtFile = "tls.crt"
|
||||
}
|
||||
if keyFile == "" {
|
||||
crtFile = "tls.key"
|
||||
}
|
||||
|
||||
envVars = append(
|
||||
envVars,
|
||||
v1.EnvVar{
|
||||
Name: "CONNECTION_POOLER_CLIENT_TLS_CRT", Value: filepath.Join("/tls", crtFile),
|
||||
},
|
||||
v1.EnvVar{
|
||||
Name: "CONNECTION_POOLER_CLIENT_TLS_KEY", Value: filepath.Join("/tls", keyFile),
|
||||
},
|
||||
)
|
||||
|
||||
// Volume
|
||||
mode := int32(0640)
|
||||
volume := v1.Volume{
|
||||
Name: "tls",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
Secret: &v1.SecretVolumeSource{
|
||||
SecretName: spec.TLS.SecretName,
|
||||
DefaultMode: &mode,
|
||||
},
|
||||
},
|
||||
}
|
||||
poolerVolumes = append(poolerVolumes, volume)
|
||||
|
||||
// Mount
|
||||
poolerContainer.VolumeMounts = []v1.VolumeMount{{
|
||||
Name: "tls",
|
||||
MountPath: "/tls",
|
||||
}}
|
||||
}
|
||||
|
||||
poolerContainer.Env = envVars
|
||||
tolerationsSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration)
|
||||
|
||||
podTemplate := &v1.PodTemplateSpec{
|
||||
|
|
@ -306,13 +398,20 @@ func (c *Cluster) generateConnectionPoolerPodTemplate(role PostgresRole) (
|
|||
TerminationGracePeriodSeconds: &gracePeriod,
|
||||
Containers: []v1.Container{poolerContainer},
|
||||
Tolerations: tolerationsSpec,
|
||||
Volumes: poolerVolumes,
|
||||
},
|
||||
}
|
||||
|
||||
nodeAffinity := c.nodeAffinity(c.OpConfig.NodeReadinessLabel, spec.NodeAffinity)
|
||||
if c.OpConfig.EnablePodAntiAffinity {
|
||||
labelsSet := labels.Set(c.connectionPoolerLabels(role, false).MatchLabels)
|
||||
podTemplate.Spec.Affinity = generatePodAffinity(labelsSet, c.OpConfig.PodAntiAffinityTopologyKey, nodeAffinity)
|
||||
podTemplate.Spec.Affinity = podAffinity(
|
||||
labelsSet,
|
||||
c.OpConfig.PodAntiAffinityTopologyKey,
|
||||
nodeAffinity,
|
||||
c.OpConfig.PodAntiAffinityPreferredDuringScheduling,
|
||||
true,
|
||||
)
|
||||
} else if nodeAffinity != nil {
|
||||
podTemplate.Spec.Affinity = nodeAffinity
|
||||
}
|
||||
|
|
@ -379,23 +478,23 @@ func (c *Cluster) generateConnectionPoolerDeployment(connectionPooler *Connectio
|
|||
}
|
||||
|
||||
func (c *Cluster) generateConnectionPoolerService(connectionPooler *ConnectionPoolerObjects) *v1.Service {
|
||||
|
||||
spec := &c.Spec
|
||||
poolerRole := connectionPooler.Role
|
||||
serviceSpec := v1.ServiceSpec{
|
||||
Ports: []v1.ServicePort{
|
||||
{
|
||||
Name: connectionPooler.Name,
|
||||
Port: pgPort,
|
||||
TargetPort: intstr.IntOrString{IntVal: c.servicePort(connectionPooler.Role)},
|
||||
TargetPort: intstr.IntOrString{IntVal: c.servicePort(poolerRole)},
|
||||
},
|
||||
},
|
||||
Type: v1.ServiceTypeClusterIP,
|
||||
Selector: map[string]string{
|
||||
"connection-pooler": c.connectionPoolerName(connectionPooler.Role),
|
||||
"connection-pooler": c.connectionPoolerName(poolerRole),
|
||||
},
|
||||
}
|
||||
|
||||
if c.shouldCreateLoadBalancerForPoolerService(connectionPooler.Role, spec) {
|
||||
if c.shouldCreateLoadBalancerForPoolerService(poolerRole, spec) {
|
||||
c.configureLoadBalanceService(&serviceSpec, spec.AllowedSourceRanges)
|
||||
}
|
||||
|
||||
|
|
@ -404,7 +503,7 @@ func (c *Cluster) generateConnectionPoolerService(connectionPooler *ConnectionPo
|
|||
Name: connectionPooler.Name,
|
||||
Namespace: connectionPooler.Namespace,
|
||||
Labels: c.connectionPoolerLabels(connectionPooler.Role, false).MatchLabels,
|
||||
Annotations: c.annotationsSet(c.generateServiceAnnotations(connectionPooler.Role, spec)),
|
||||
Annotations: c.annotationsSet(c.generatePoolerServiceAnnotations(poolerRole, spec)),
|
||||
// make StatefulSet object its owner to represent the dependency.
|
||||
// By itself StatefulSet is being deleted with "Orphaned"
|
||||
// propagation policy, which means that it's deletion will not
|
||||
|
|
@ -419,6 +518,32 @@ func (c *Cluster) generateConnectionPoolerService(connectionPooler *ConnectionPo
|
|||
return service
|
||||
}
|
||||
|
||||
func (c *Cluster) generatePoolerServiceAnnotations(role PostgresRole, spec *acidv1.PostgresSpec) map[string]string {
|
||||
var dnsString string
|
||||
annotations := c.getCustomServiceAnnotations(role, spec)
|
||||
|
||||
if c.shouldCreateLoadBalancerForPoolerService(role, spec) {
|
||||
// set ELB Timeout annotation with default value
|
||||
if _, ok := annotations[constants.ElbTimeoutAnnotationName]; !ok {
|
||||
annotations[constants.ElbTimeoutAnnotationName] = constants.ElbTimeoutAnnotationValue
|
||||
}
|
||||
// -repl suffix will be added by replicaDNSName
|
||||
clusterNameWithPoolerSuffix := c.connectionPoolerName(Master)
|
||||
if role == Master {
|
||||
dnsString = c.masterDNSName(clusterNameWithPoolerSuffix)
|
||||
} else {
|
||||
dnsString = c.replicaDNSName(clusterNameWithPoolerSuffix)
|
||||
}
|
||||
annotations[constants.ZalandoDNSNameAnnotation] = dnsString
|
||||
}
|
||||
|
||||
if len(annotations) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return annotations
|
||||
}
|
||||
|
||||
func (c *Cluster) shouldCreateLoadBalancerForPoolerService(role PostgresRole, spec *acidv1.PostgresSpec) bool {
|
||||
|
||||
switch role {
|
||||
|
|
@ -442,6 +567,14 @@ func (c *Cluster) shouldCreateLoadBalancerForPoolerService(role PostgresRole, sp
|
|||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) listPoolerPods(listOptions metav1.ListOptions) ([]v1.Pod, error) {
|
||||
pods, err := c.KubeClient.Pods(c.Namespace).List(context.TODO(), listOptions)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get list of pooler pods: %v", err)
|
||||
}
|
||||
return pods.Items, nil
|
||||
}
|
||||
|
||||
// delete connection pooler
|
||||
func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) {
|
||||
c.logger.Infof("deleting connection pooler spilo-role=%s", role)
|
||||
|
|
@ -820,6 +953,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql
|
|||
var (
|
||||
deployment *appsv1.Deployment
|
||||
newDeployment *appsv1.Deployment
|
||||
pods []v1.Pod
|
||||
service *v1.Service
|
||||
newService *v1.Service
|
||||
err error
|
||||
|
|
@ -909,6 +1043,34 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql
|
|||
c.ConnectionPooler[role].Deployment = deployment
|
||||
}
|
||||
|
||||
// check if pooler pods must be replaced due to secret update
|
||||
listOptions := metav1.ListOptions{
|
||||
LabelSelector: labels.Set(c.connectionPoolerLabels(role, true).MatchLabels).String(),
|
||||
}
|
||||
pods, err = c.listPoolerPods(listOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for i, pod := range pods {
|
||||
if c.getRollingUpdateFlagFromPod(&pod) {
|
||||
podName := util.NameFromMeta(pods[i].ObjectMeta)
|
||||
err := retryutil.Retry(1*time.Second, 5*time.Second,
|
||||
func() (bool, error) {
|
||||
err2 := c.KubeClient.Pods(podName.Namespace).Delete(
|
||||
context.TODO(),
|
||||
podName.Name,
|
||||
c.deleteOptions)
|
||||
if err2 != nil {
|
||||
return false, err2
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not delete pooler pod: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if service, err = c.KubeClient.Services(c.Namespace).Get(context.TODO(), c.connectionPoolerName(role), metav1.GetOptions{}); err == nil {
|
||||
c.ConnectionPooler[role].Service = service
|
||||
desiredSvc := c.generateConnectionPoolerService(c.ConnectionPooler[role])
|
||||
|
|
|
|||
|
|
@ -263,6 +263,7 @@ func TestConnectionPoolerCreateDeletion(t *testing.T) {
|
|||
client := k8sutil.KubernetesClient{
|
||||
StatefulSetsGetter: clientSet.AppsV1(),
|
||||
ServicesGetter: clientSet.CoreV1(),
|
||||
PodsGetter: clientSet.CoreV1(),
|
||||
DeploymentsGetter: clientSet.AppsV1(),
|
||||
PostgresqlsGetter: acidClientSet.AcidV1(),
|
||||
SecretsGetter: clientSet.CoreV1(),
|
||||
|
|
@ -372,6 +373,7 @@ func TestConnectionPoolerSync(t *testing.T) {
|
|||
client := k8sutil.KubernetesClient{
|
||||
StatefulSetsGetter: clientSet.AppsV1(),
|
||||
ServicesGetter: clientSet.CoreV1(),
|
||||
PodsGetter: clientSet.CoreV1(),
|
||||
DeploymentsGetter: clientSet.AppsV1(),
|
||||
PostgresqlsGetter: acidClientSet.AcidV1(),
|
||||
SecretsGetter: clientSet.CoreV1(),
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ const (
|
|||
|
||||
getPublicationsSQL = `SELECT p.pubname, string_agg(pt.schemaname || '.' || pt.tablename, ', ' ORDER BY pt.schemaname, pt.tablename)
|
||||
FROM pg_publication p
|
||||
JOIN pg_publication_tables pt ON pt.pubname = p.pubname
|
||||
LEFT JOIN pg_publication_tables pt ON pt.pubname = p.pubname
|
||||
GROUP BY p.pubname;`
|
||||
createPublicationSQL = `CREATE PUBLICATION "%s" FOR TABLE %s WITH (publish = 'insert, update');`
|
||||
alterPublicationSQL = `ALTER PUBLICATION "%s" SET TABLE %s;`
|
||||
|
|
@ -231,7 +231,8 @@ func (c *Cluster) readPgUsersFromDatabase(userNames []string) (users spec.PgUser
|
|||
parameters[fields[0]] = fields[1]
|
||||
}
|
||||
|
||||
if strings.HasSuffix(rolname, c.OpConfig.RoleDeletionSuffix) {
|
||||
// consider NOLOGIN roles with deleted suffix as deprecated users
|
||||
if strings.HasSuffix(rolname, c.OpConfig.RoleDeletionSuffix) && !rolcanlogin {
|
||||
roldeleted = true
|
||||
}
|
||||
|
||||
|
|
@ -283,7 +284,7 @@ func (c *Cluster) cleanupRotatedUsers(rotatedUsers []string, db *sql.DB) error {
|
|||
retentionDate := time.Now().AddDate(0, 0, int(retenionDays)*-1)
|
||||
|
||||
for rotatedUser, dateSuffix := range extraUsers {
|
||||
userCreationDate, err := time.Parse("060102", dateSuffix)
|
||||
userCreationDate, err := time.Parse(constants.RotationUserDateFormat, dateSuffix)
|
||||
if err != nil {
|
||||
c.logger.Errorf("could not parse creation date suffix of user %q: %v", rotatedUser, err)
|
||||
continue
|
||||
|
|
|
|||
|
|
@ -13,7 +13,7 @@ import (
|
|||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policybeta1 "k8s.io/api/policy/v1beta1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
"k8s.io/apimachinery/pkg/api/resource"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
|
@ -28,8 +28,8 @@ import (
|
|||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||
"github.com/zalando/postgres-operator/pkg/util/patroni"
|
||||
"github.com/zalando/postgres-operator/pkg/util/retryutil"
|
||||
"golang.org/x/exp/maps"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
batchv1beta1 "k8s.io/api/batch/v1beta1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
)
|
||||
|
||||
|
|
@ -60,6 +60,7 @@ type patroniDCS struct {
|
|||
SynchronousNodeCount uint32 `json:"synchronous_node_count,omitempty"`
|
||||
PGBootstrapConfiguration map[string]interface{} `json:"postgresql,omitempty"`
|
||||
Slots map[string]map[string]string `json:"slots,omitempty"`
|
||||
FailsafeMode *bool `json:"failsafe_mode,omitempty"`
|
||||
}
|
||||
|
||||
type pgBootstrap struct {
|
||||
|
|
@ -80,7 +81,7 @@ func (c *Cluster) statefulSetName() string {
|
|||
func (c *Cluster) endpointName(role PostgresRole) string {
|
||||
name := c.Name
|
||||
if role == Replica {
|
||||
name = name + "-repl"
|
||||
name = fmt.Sprintf("%s-%s", name, "repl")
|
||||
}
|
||||
|
||||
return name
|
||||
|
|
@ -89,7 +90,7 @@ func (c *Cluster) endpointName(role PostgresRole) string {
|
|||
func (c *Cluster) serviceName(role PostgresRole) string {
|
||||
name := c.Name
|
||||
if role == Replica {
|
||||
name = name + "-repl"
|
||||
name = fmt.Sprintf("%s-%s", name, "repl")
|
||||
}
|
||||
|
||||
return name
|
||||
|
|
@ -102,8 +103,9 @@ func (c *Cluster) serviceAddress(role PostgresRole) string {
|
|||
return service.ObjectMeta.Name
|
||||
}
|
||||
|
||||
c.logger.Warningf("No service for role %s", role)
|
||||
return ""
|
||||
defaultAddress := c.serviceName(role)
|
||||
c.logger.Warningf("No service for role %s - defaulting to %s", role, defaultAddress)
|
||||
return defaultAddress
|
||||
}
|
||||
|
||||
func (c *Cluster) servicePort(role PostgresRole) int32 {
|
||||
|
|
@ -138,6 +140,23 @@ func makeDefaultResources(config *config.Config) acidv1.Resources {
|
|||
}
|
||||
}
|
||||
|
||||
func makeLogicalBackupResources(config *config.Config) acidv1.Resources {
|
||||
|
||||
logicalBackupResourceRequests := acidv1.ResourceDescription{
|
||||
CPU: config.LogicalBackup.LogicalBackupCPURequest,
|
||||
Memory: config.LogicalBackup.LogicalBackupMemoryRequest,
|
||||
}
|
||||
logicalBackupResourceLimits := acidv1.ResourceDescription{
|
||||
CPU: config.LogicalBackup.LogicalBackupCPULimit,
|
||||
Memory: config.LogicalBackup.LogicalBackupMemoryLimit,
|
||||
}
|
||||
|
||||
return acidv1.Resources{
|
||||
ResourceRequests: logicalBackupResourceRequests,
|
||||
ResourceLimits: logicalBackupResourceLimits,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *Cluster) enforceMinResourceLimits(resources *v1.ResourceRequirements) error {
|
||||
var (
|
||||
isSmaller bool
|
||||
|
|
@ -183,6 +202,32 @@ func (c *Cluster) enforceMinResourceLimits(resources *v1.ResourceRequirements) e
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) enforceMaxResourceRequests(resources *v1.ResourceRequirements) error {
|
||||
var (
|
||||
err error
|
||||
)
|
||||
|
||||
cpuRequest := resources.Requests[v1.ResourceCPU]
|
||||
maxCPURequest := c.OpConfig.MaxCPURequest
|
||||
maxCPU, err := util.MinResource(maxCPURequest, cpuRequest.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not compare defined CPU request %s for %q container with configured maximum value %s: %v",
|
||||
cpuRequest.String(), constants.PostgresContainerName, maxCPURequest, err)
|
||||
}
|
||||
resources.Requests[v1.ResourceCPU] = maxCPU
|
||||
|
||||
memoryRequest := resources.Requests[v1.ResourceMemory]
|
||||
maxMemoryRequest := c.OpConfig.MaxMemoryRequest
|
||||
maxMemory, err := util.MinResource(maxMemoryRequest, memoryRequest.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not compare defined memory request %s for %q container with configured maximum value %s: %v",
|
||||
memoryRequest.String(), constants.PostgresContainerName, maxMemoryRequest, err)
|
||||
}
|
||||
resources.Requests[v1.ResourceMemory] = maxMemory
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func setMemoryRequestToLimit(resources *v1.ResourceRequirements, containerName string, logger *logrus.Entry) {
|
||||
|
||||
requests := resources.Requests[v1.ResourceMemory]
|
||||
|
|
@ -260,10 +305,17 @@ func (c *Cluster) generateResourceRequirements(
|
|||
setMemoryRequestToLimit(&result, containerName, c.logger)
|
||||
}
|
||||
|
||||
// enforce maximum cpu and memory requests for Postgres containers only
|
||||
if containerName == constants.PostgresContainerName {
|
||||
if err = c.enforceMaxResourceRequests(&result); err != nil {
|
||||
return nil, fmt.Errorf("could not enforce maximum resource requests: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
return &result, nil
|
||||
}
|
||||
|
||||
func generateSpiloJSONConfiguration(pg *acidv1.PostgresqlParam, patroni *acidv1.Patroni, pamRoleName string, EnablePgVersionEnvVar bool, logger *logrus.Entry) (string, error) {
|
||||
func generateSpiloJSONConfiguration(pg *acidv1.PostgresqlParam, patroni *acidv1.Patroni, opConfig *config.Config, logger *logrus.Entry) (string, error) {
|
||||
config := spiloConfiguration{}
|
||||
|
||||
config.Bootstrap = pgBootstrap{}
|
||||
|
|
@ -345,6 +397,11 @@ PatroniInitDBParams:
|
|||
if patroni.SynchronousNodeCount >= 1 {
|
||||
config.Bootstrap.DCS.SynchronousNodeCount = patroni.SynchronousNodeCount
|
||||
}
|
||||
if patroni.FailsafeMode != nil {
|
||||
config.Bootstrap.DCS.FailsafeMode = patroni.FailsafeMode
|
||||
} else if opConfig.EnablePatroniFailsafeMode != nil {
|
||||
config.Bootstrap.DCS.FailsafeMode = opConfig.EnablePatroniFailsafeMode
|
||||
}
|
||||
|
||||
config.PgLocalConfiguration = make(map[string]interface{})
|
||||
|
||||
|
|
@ -352,7 +409,7 @@ PatroniInitDBParams:
|
|||
// setting postgresq.bin_dir in the SPILO_CONFIGURATION still works and takes precedence over PGVERSION
|
||||
// so we add postgresq.bin_dir only if PGVERSION is unused
|
||||
// see PR 222 in Spilo
|
||||
if !EnablePgVersionEnvVar {
|
||||
if !opConfig.EnablePgVersionEnvVar {
|
||||
config.PgLocalConfiguration[patroniPGBinariesParameterName] = fmt.Sprintf(pgBinariesLocationTemplate, pg.PgVersion)
|
||||
}
|
||||
if len(pg.Parameters) > 0 {
|
||||
|
|
@ -374,7 +431,7 @@ PatroniInitDBParams:
|
|||
}
|
||||
|
||||
config.Bootstrap.Users = map[string]pgUser{
|
||||
pamRoleName: {
|
||||
opConfig.PamRoleName: {
|
||||
Password: "",
|
||||
Options: []string{constants.RoleFlagCreateDB, constants.RoleFlagNoLogin},
|
||||
},
|
||||
|
|
@ -456,17 +513,26 @@ func (c *Cluster) nodeAffinity(nodeReadinessLabel map[string]string, nodeAffinit
|
|||
}
|
||||
}
|
||||
|
||||
func generatePodAffinity(labels labels.Set, topologyKey string, nodeAffinity *v1.Affinity) *v1.Affinity {
|
||||
// generate pod anti-affinity to avoid multiple pods of the same Postgres cluster in the same topology , e.g. node
|
||||
podAffinity := v1.Affinity{
|
||||
PodAntiAffinity: &v1.PodAntiAffinity{
|
||||
RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{{
|
||||
func podAffinity(
|
||||
labels labels.Set,
|
||||
topologyKey string,
|
||||
nodeAffinity *v1.Affinity,
|
||||
preferredDuringScheduling bool,
|
||||
anti bool) *v1.Affinity {
|
||||
|
||||
var podAffinity v1.Affinity
|
||||
|
||||
podAffinityTerm := v1.PodAffinityTerm{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
},
|
||||
TopologyKey: topologyKey,
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
if anti {
|
||||
podAffinity.PodAntiAffinity = generatePodAntiAffinity(podAffinityTerm, preferredDuringScheduling)
|
||||
} else {
|
||||
podAffinity.PodAffinity = generatePodAffinity(podAffinityTerm, preferredDuringScheduling)
|
||||
}
|
||||
|
||||
if nodeAffinity != nil && nodeAffinity.NodeAffinity != nil {
|
||||
|
|
@ -476,6 +542,36 @@ func generatePodAffinity(labels labels.Set, topologyKey string, nodeAffinity *v1
|
|||
return &podAffinity
|
||||
}
|
||||
|
||||
func generatePodAffinity(podAffinityTerm v1.PodAffinityTerm, preferredDuringScheduling bool) *v1.PodAffinity {
|
||||
podAffinity := &v1.PodAffinity{}
|
||||
|
||||
if preferredDuringScheduling {
|
||||
podAffinity.PreferredDuringSchedulingIgnoredDuringExecution = []v1.WeightedPodAffinityTerm{{
|
||||
Weight: 1,
|
||||
PodAffinityTerm: podAffinityTerm,
|
||||
}}
|
||||
} else {
|
||||
podAffinity.RequiredDuringSchedulingIgnoredDuringExecution = []v1.PodAffinityTerm{podAffinityTerm}
|
||||
}
|
||||
|
||||
return podAffinity
|
||||
}
|
||||
|
||||
func generatePodAntiAffinity(podAffinityTerm v1.PodAffinityTerm, preferredDuringScheduling bool) *v1.PodAntiAffinity {
|
||||
podAntiAffinity := &v1.PodAntiAffinity{}
|
||||
|
||||
if preferredDuringScheduling {
|
||||
podAntiAffinity.PreferredDuringSchedulingIgnoredDuringExecution = []v1.WeightedPodAffinityTerm{{
|
||||
Weight: 1,
|
||||
PodAffinityTerm: podAffinityTerm,
|
||||
}}
|
||||
} else {
|
||||
podAntiAffinity.RequiredDuringSchedulingIgnoredDuringExecution = []v1.PodAffinityTerm{podAffinityTerm}
|
||||
}
|
||||
|
||||
return podAntiAffinity
|
||||
}
|
||||
|
||||
func tolerations(tolerationsSpec *[]v1.Toleration, podToleration map[string]string) []v1.Toleration {
|
||||
// allow to override tolerations by postgresql manifest
|
||||
if len(*tolerationsSpec) > 0 {
|
||||
|
|
@ -674,6 +770,7 @@ func (c *Cluster) generatePodTemplate(
|
|||
spiloContainer *v1.Container,
|
||||
initContainers []v1.Container,
|
||||
sidecarContainers []v1.Container,
|
||||
sharePgSocketWithSidecars *bool,
|
||||
tolerationsSpec *[]v1.Toleration,
|
||||
spiloRunAsUser *int64,
|
||||
spiloRunAsGroup *int64,
|
||||
|
|
@ -687,6 +784,7 @@ func (c *Cluster) generatePodTemplate(
|
|||
shmVolume *bool,
|
||||
podAntiAffinity bool,
|
||||
podAntiAffinityTopologyKey string,
|
||||
podAntiAffinityPreferredDuringScheduling bool,
|
||||
additionalSecretMount string,
|
||||
additionalSecretMountPath string,
|
||||
additionalVolumes []acidv1.AdditionalVolume,
|
||||
|
|
@ -727,7 +825,13 @@ func (c *Cluster) generatePodTemplate(
|
|||
}
|
||||
|
||||
if podAntiAffinity {
|
||||
podSpec.Affinity = generatePodAffinity(labels, podAntiAffinityTopologyKey, nodeAffinity)
|
||||
podSpec.Affinity = podAffinity(
|
||||
labels,
|
||||
podAntiAffinityTopologyKey,
|
||||
nodeAffinity,
|
||||
podAntiAffinityPreferredDuringScheduling,
|
||||
true,
|
||||
)
|
||||
} else if nodeAffinity != nil {
|
||||
podSpec.Affinity = nodeAffinity
|
||||
}
|
||||
|
|
@ -736,6 +840,10 @@ func (c *Cluster) generatePodTemplate(
|
|||
podSpec.PriorityClassName = priorityClassName
|
||||
}
|
||||
|
||||
if sharePgSocketWithSidecars != nil && *sharePgSocketWithSidecars {
|
||||
addVarRunVolume(&podSpec)
|
||||
}
|
||||
|
||||
if additionalSecretMount != "" {
|
||||
addSecretVolume(&podSpec, additionalSecretMount, additionalSecretMountPath)
|
||||
}
|
||||
|
|
@ -764,10 +872,9 @@ func (c *Cluster) generatePodTemplate(
|
|||
|
||||
// generatePodEnvVars generates environment variables for the Spilo Pod
|
||||
func (c *Cluster) generateSpiloPodEnvVars(
|
||||
spec *acidv1.PostgresSpec,
|
||||
uid types.UID,
|
||||
spiloConfiguration string,
|
||||
cloneDescription *acidv1.CloneDescription,
|
||||
standbyDescription *acidv1.StandbyDescription) []v1.EnvVar {
|
||||
spiloConfiguration string) ([]v1.EnvVar, error) {
|
||||
|
||||
// hard-coded set of environment variables we need
|
||||
// to guarantee core functionality of the operator
|
||||
|
|
@ -873,24 +980,24 @@ func (c *Cluster) generateSpiloPodEnvVars(
|
|||
envVars = append(envVars, v1.EnvVar{Name: "KUBERNETES_USE_CONFIGMAPS", Value: "true"})
|
||||
}
|
||||
|
||||
if cloneDescription != nil && cloneDescription.ClusterName != "" {
|
||||
envVars = append(envVars, c.generateCloneEnvironment(cloneDescription)...)
|
||||
}
|
||||
|
||||
if standbyDescription != nil {
|
||||
envVars = append(envVars, c.generateStandbyEnvironment(standbyDescription)...)
|
||||
}
|
||||
|
||||
// fetch cluster-specific variables that will override all subsequent global variables
|
||||
if len(c.Spec.Env) > 0 {
|
||||
envVars = appendEnvVars(envVars, c.Spec.Env...)
|
||||
if len(spec.Env) > 0 {
|
||||
envVars = appendEnvVars(envVars, spec.Env...)
|
||||
}
|
||||
|
||||
if spec.Clone != nil && spec.Clone.ClusterName != "" {
|
||||
envVars = append(envVars, c.generateCloneEnvironment(spec.Clone)...)
|
||||
}
|
||||
|
||||
if spec.StandbyCluster != nil {
|
||||
envVars = append(envVars, c.generateStandbyEnvironment(spec.StandbyCluster)...)
|
||||
}
|
||||
|
||||
// fetch variables from custom environment Secret
|
||||
// that will override all subsequent global variables
|
||||
secretEnvVarsList, err := c.getPodEnvironmentSecretVariables()
|
||||
if err != nil {
|
||||
c.logger.Warningf("%v", err)
|
||||
return nil, err
|
||||
}
|
||||
envVars = appendEnvVars(envVars, secretEnvVarsList...)
|
||||
|
||||
|
|
@ -898,7 +1005,7 @@ func (c *Cluster) generateSpiloPodEnvVars(
|
|||
// that will override all subsequent global variables
|
||||
configMapEnvVarsList, err := c.getPodEnvironmentConfigMapVariables()
|
||||
if err != nil {
|
||||
c.logger.Warningf("%v", err)
|
||||
return nil, err
|
||||
}
|
||||
envVars = appendEnvVars(envVars, configMapEnvVarsList...)
|
||||
|
||||
|
|
@ -934,7 +1041,7 @@ func (c *Cluster) generateSpiloPodEnvVars(
|
|||
|
||||
envVars = appendEnvVars(envVars, opConfigEnvVars...)
|
||||
|
||||
return envVars
|
||||
return envVars, nil
|
||||
}
|
||||
|
||||
func appendEnvVars(envs []v1.EnvVar, appEnv ...v1.EnvVar) []v1.EnvVar {
|
||||
|
|
@ -1085,17 +1192,18 @@ func extractPgVersionFromBinPath(binPath string, template string) (string, error
|
|||
|
||||
func generateSpiloReadinessProbe() *v1.Probe {
|
||||
return &v1.Probe{
|
||||
Handler: v1.Handler{
|
||||
FailureThreshold: 3,
|
||||
ProbeHandler: v1.ProbeHandler{
|
||||
HTTPGet: &v1.HTTPGetAction{
|
||||
Path: "/readiness",
|
||||
Port: intstr.IntOrString{IntVal: patroni.ApiPort},
|
||||
Scheme: v1.URISchemeHTTP,
|
||||
},
|
||||
},
|
||||
InitialDelaySeconds: 6,
|
||||
PeriodSeconds: 10,
|
||||
TimeoutSeconds: 5,
|
||||
SuccessThreshold: 1,
|
||||
FailureThreshold: 3,
|
||||
TimeoutSeconds: 5,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -1146,17 +1254,16 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
}
|
||||
}
|
||||
|
||||
spiloConfiguration, err := generateSpiloJSONConfiguration(&spec.PostgresqlParam, &spec.Patroni, c.OpConfig.PamRoleName, c.OpConfig.EnablePgVersionEnvVar, c.logger)
|
||||
spiloConfiguration, err := generateSpiloJSONConfiguration(&spec.PostgresqlParam, &spec.Patroni, &c.OpConfig, c.logger)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not generate Spilo JSON configuration: %v", err)
|
||||
}
|
||||
|
||||
// generate environment variables for the spilo container
|
||||
spiloEnvVars := c.generateSpiloPodEnvVars(
|
||||
c.Postgresql.GetUID(),
|
||||
spiloConfiguration,
|
||||
spec.Clone,
|
||||
spec.StandbyCluster)
|
||||
spiloEnvVars, err := c.generateSpiloPodEnvVars(spec, c.Postgresql.GetUID(), spiloConfiguration)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not generate Spilo env vars: %v", err)
|
||||
}
|
||||
|
||||
// pickup the docker image for the spilo container
|
||||
effectiveDockerImage := util.Coalesce(spec.DockerImage, c.OpConfig.DockerImage)
|
||||
|
|
@ -1246,7 +1353,9 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
)
|
||||
|
||||
// Patroni responds 200 to probe only if it either owns the leader lock or postgres is running and DCS is accessible
|
||||
if c.OpConfig.EnableReadinessProbe {
|
||||
spiloContainer.ReadinessProbe = generateSpiloReadinessProbe()
|
||||
}
|
||||
|
||||
// generate container specs for sidecars specified in the cluster manifest
|
||||
clusterSpecificSidecars := []v1.Container{}
|
||||
|
|
@ -1317,6 +1426,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
spiloContainer,
|
||||
initContainers,
|
||||
sidecarContainers,
|
||||
c.OpConfig.SharePgSocketWithSidecars,
|
||||
&tolerationSpec,
|
||||
effectiveRunAsUser,
|
||||
effectiveRunAsGroup,
|
||||
|
|
@ -1330,6 +1440,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
mountShmVolumeNeeded(c.OpConfig, spec),
|
||||
c.OpConfig.EnablePodAntiAffinity,
|
||||
c.OpConfig.PodAntiAffinityTopologyKey,
|
||||
c.OpConfig.PodAntiAffinityPreferredDuringScheduling,
|
||||
c.OpConfig.AdditionalSecretMount,
|
||||
c.OpConfig.AdditionalSecretMountPath,
|
||||
additionalVolumes)
|
||||
|
|
@ -1343,6 +1454,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
|
|||
return nil, fmt.Errorf("could not generate volume claim template: %v", err)
|
||||
}
|
||||
|
||||
// global minInstances and maxInstances settings can overwrite manifest
|
||||
numberOfInstances := c.getNumberOfInstances(spec)
|
||||
|
||||
// the operator has domain-specific logic on how to do rolling updates of PG clusters
|
||||
|
|
@ -1443,9 +1555,16 @@ func (c *Cluster) generateScalyrSidecarSpec(clusterName, APIKey, serverURL, dock
|
|||
func (c *Cluster) getNumberOfInstances(spec *acidv1.PostgresSpec) int32 {
|
||||
min := c.OpConfig.MinInstances
|
||||
max := c.OpConfig.MaxInstances
|
||||
instanceLimitAnnotationKey := c.OpConfig.IgnoreInstanceLimitsAnnotationKey
|
||||
cur := spec.NumberOfInstances
|
||||
newcur := cur
|
||||
|
||||
if instanceLimitAnnotationKey != "" {
|
||||
if value, exists := c.ObjectMeta.Annotations[instanceLimitAnnotationKey]; exists && value == "true" {
|
||||
return cur
|
||||
}
|
||||
}
|
||||
|
||||
if spec.StandbyCluster != nil {
|
||||
if newcur == 1 {
|
||||
min = newcur
|
||||
|
|
@ -1502,6 +1621,28 @@ func addShmVolume(podSpec *v1.PodSpec) {
|
|||
podSpec.Volumes = volumes
|
||||
}
|
||||
|
||||
func addVarRunVolume(podSpec *v1.PodSpec) {
|
||||
volumes := append(podSpec.Volumes, v1.Volume{
|
||||
Name: "postgresql-run",
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{
|
||||
Medium: "Memory",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
for i := range podSpec.Containers {
|
||||
mounts := append(podSpec.Containers[i].VolumeMounts,
|
||||
v1.VolumeMount{
|
||||
Name: constants.RunVolumeName,
|
||||
MountPath: constants.RunVolumePath,
|
||||
})
|
||||
podSpec.Containers[i].VolumeMounts = mounts
|
||||
}
|
||||
|
||||
podSpec.Volumes = volumes
|
||||
}
|
||||
|
||||
func addSecretVolume(podSpec *v1.PodSpec, additionalSecretMount string, additionalSecretMountPath string) {
|
||||
volumes := append(podSpec.Volumes, v1.Volume{
|
||||
Name: additionalSecretMount,
|
||||
|
|
@ -1760,27 +1901,13 @@ func (c *Cluster) configureLoadBalanceService(serviceSpec *v1.ServiceSpec, sourc
|
|||
}
|
||||
|
||||
func (c *Cluster) generateServiceAnnotations(role PostgresRole, spec *acidv1.PostgresSpec) map[string]string {
|
||||
annotations := make(map[string]string)
|
||||
|
||||
for k, v := range c.OpConfig.CustomServiceAnnotations {
|
||||
annotations[k] = v
|
||||
}
|
||||
if spec != nil || spec.ServiceAnnotations != nil {
|
||||
for k, v := range spec.ServiceAnnotations {
|
||||
annotations[k] = v
|
||||
}
|
||||
}
|
||||
annotations := c.getCustomServiceAnnotations(role, spec)
|
||||
|
||||
if c.shouldCreateLoadBalancerForService(role, spec) {
|
||||
var dnsName string
|
||||
if role == Master {
|
||||
dnsName = c.masterDNSName()
|
||||
} else {
|
||||
dnsName = c.replicaDNSName()
|
||||
}
|
||||
dnsName := c.dnsName(role)
|
||||
|
||||
// Just set ELB Timeout annotation with default value, if it does not
|
||||
// have a cutom value
|
||||
// have a custom value
|
||||
if _, ok := annotations[constants.ElbTimeoutAnnotationName]; !ok {
|
||||
annotations[constants.ElbTimeoutAnnotationName] = constants.ElbTimeoutAnnotationValue
|
||||
}
|
||||
|
|
@ -1795,6 +1922,24 @@ func (c *Cluster) generateServiceAnnotations(role PostgresRole, spec *acidv1.Pos
|
|||
return annotations
|
||||
}
|
||||
|
||||
func (c *Cluster) getCustomServiceAnnotations(role PostgresRole, spec *acidv1.PostgresSpec) map[string]string {
|
||||
annotations := make(map[string]string)
|
||||
maps.Copy(annotations, c.OpConfig.CustomServiceAnnotations)
|
||||
|
||||
if spec != nil {
|
||||
maps.Copy(annotations, spec.ServiceAnnotations)
|
||||
|
||||
switch role {
|
||||
case Master:
|
||||
maps.Copy(annotations, spec.MasterServiceAnnotations)
|
||||
case Replica:
|
||||
maps.Copy(annotations, spec.ReplicaServiceAnnotations)
|
||||
}
|
||||
}
|
||||
|
||||
return annotations
|
||||
}
|
||||
|
||||
func (c *Cluster) generateEndpoint(role PostgresRole, subsets []v1.EndpointSubset) *v1.Endpoints {
|
||||
endpoints := &v1.Endpoints{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
|
|
@ -1943,7 +2088,7 @@ func (c *Cluster) generateStandbyEnvironment(description *acidv1.StandbyDescript
|
|||
return result
|
||||
}
|
||||
|
||||
func (c *Cluster) generatePodDisruptionBudget() *policybeta1.PodDisruptionBudget {
|
||||
func (c *Cluster) generatePodDisruptionBudget() *policyv1.PodDisruptionBudget {
|
||||
minAvailable := intstr.FromInt(1)
|
||||
pdbEnabled := c.OpConfig.EnablePodDisruptionBudget
|
||||
|
||||
|
|
@ -1952,14 +2097,14 @@ func (c *Cluster) generatePodDisruptionBudget() *policybeta1.PodDisruptionBudget
|
|||
minAvailable = intstr.FromInt(0)
|
||||
}
|
||||
|
||||
return &policybeta1.PodDisruptionBudget{
|
||||
return &policyv1.PodDisruptionBudget{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c.podDisruptionBudgetName(),
|
||||
Namespace: c.Namespace,
|
||||
Labels: c.labelsSet(true),
|
||||
Annotations: c.annotationsSet(nil),
|
||||
},
|
||||
Spec: policybeta1.PodDisruptionBudgetSpec{
|
||||
Spec: policyv1.PodDisruptionBudgetSpec{
|
||||
MinAvailable: &minAvailable,
|
||||
Selector: &metav1.LabelSelector{
|
||||
MatchLabels: c.roleLabelsSet(false, Master),
|
||||
|
|
@ -1977,7 +2122,7 @@ func (c *Cluster) getClusterServiceConnectionParameters(clusterName string) (hos
|
|||
return
|
||||
}
|
||||
|
||||
func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
|
||||
func (c *Cluster) generateLogicalBackupJob() (*batchv1.CronJob, error) {
|
||||
|
||||
var (
|
||||
err error
|
||||
|
|
@ -1989,9 +2134,12 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
|
|||
|
||||
c.logger.Debug("Generating logical backup pod template")
|
||||
|
||||
// allocate for the backup pod the same amount of resources as for normal DB pods
|
||||
// allocate configured resources for logical backup pod
|
||||
logicalBackupResources := makeLogicalBackupResources(&c.OpConfig)
|
||||
// if not defined only default resources from spilo pods are used
|
||||
resourceRequirements, err = c.generateResourceRequirements(
|
||||
c.Spec.Resources, makeDefaultResources(&c.OpConfig), logicalBackupContainerName)
|
||||
&logicalBackupResources, makeDefaultResources(&c.OpConfig), logicalBackupContainerName)
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not generate resource requirements for logical backup pods: %v", err)
|
||||
}
|
||||
|
|
@ -2012,20 +2160,15 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
|
|||
c.OpConfig.ClusterNameLabel: c.Name,
|
||||
"application": "spilo-logical-backup",
|
||||
}
|
||||
podAffinityTerm := v1.PodAffinityTerm{
|
||||
LabelSelector: &metav1.LabelSelector{
|
||||
MatchLabels: labels,
|
||||
},
|
||||
TopologyKey: "kubernetes.io/hostname",
|
||||
}
|
||||
podAffinity := v1.Affinity{
|
||||
PodAffinity: &v1.PodAffinity{
|
||||
PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{{
|
||||
Weight: 1,
|
||||
PodAffinityTerm: podAffinityTerm,
|
||||
},
|
||||
},
|
||||
}}
|
||||
|
||||
nodeAffinity := c.nodeAffinity(c.OpConfig.NodeReadinessLabel, nil)
|
||||
podAffinity := podAffinity(
|
||||
labels,
|
||||
"kubernetes.io/hostname",
|
||||
nodeAffinity,
|
||||
true,
|
||||
false,
|
||||
)
|
||||
|
||||
annotations := c.generatePodAnnotations(&c.Spec)
|
||||
|
||||
|
|
@ -2037,6 +2180,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
|
|||
logicalBackupContainer,
|
||||
[]v1.Container{},
|
||||
[]v1.Container{},
|
||||
util.False(),
|
||||
&[]v1.Toleration{},
|
||||
nil,
|
||||
nil,
|
||||
|
|
@ -2050,6 +2194,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
|
|||
util.False(),
|
||||
false,
|
||||
"",
|
||||
false,
|
||||
c.OpConfig.AdditionalSecretMount,
|
||||
c.OpConfig.AdditionalSecretMountPath,
|
||||
[]acidv1.AdditionalVolume{}); err != nil {
|
||||
|
|
@ -2057,7 +2202,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
|
|||
}
|
||||
|
||||
// overwrite specific params of logical backups pods
|
||||
podTemplate.Spec.Affinity = &podAffinity
|
||||
podTemplate.Spec.Affinity = podAffinity
|
||||
podTemplate.Spec.RestartPolicy = "Never" // affects containers within a pod
|
||||
|
||||
// configure a batch job
|
||||
|
|
@ -2068,7 +2213,7 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
|
|||
|
||||
// configure a cron job
|
||||
|
||||
jobTemplateSpec := batchv1beta1.JobTemplateSpec{
|
||||
jobTemplateSpec := batchv1.JobTemplateSpec{
|
||||
Spec: jobSpec,
|
||||
}
|
||||
|
||||
|
|
@ -2077,17 +2222,17 @@ func (c *Cluster) generateLogicalBackupJob() (*batchv1beta1.CronJob, error) {
|
|||
schedule = c.OpConfig.LogicalBackupSchedule
|
||||
}
|
||||
|
||||
cronJob := &batchv1beta1.CronJob{
|
||||
cronJob := &batchv1.CronJob{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: c.getLogicalBackupJobName(),
|
||||
Namespace: c.Namespace,
|
||||
Labels: c.labelsSet(true),
|
||||
Annotations: c.annotationsSet(nil),
|
||||
},
|
||||
Spec: batchv1beta1.CronJobSpec{
|
||||
Spec: batchv1.CronJobSpec{
|
||||
Schedule: schedule,
|
||||
JobTemplate: jobTemplateSpec,
|
||||
ConcurrencyPolicy: batchv1beta1.ForbidConcurrent,
|
||||
ConcurrencyPolicy: batchv1.ForbidConcurrent,
|
||||
},
|
||||
}
|
||||
|
||||
|
|
@ -2147,6 +2292,18 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar {
|
|||
Name: "LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS",
|
||||
Value: c.OpConfig.LogicalBackup.LogicalBackupGoogleApplicationCredentials,
|
||||
},
|
||||
{
|
||||
Name: "LOGICAL_BACKUP_AZURE_STORAGE_ACCOUNT_NAME",
|
||||
Value: c.OpConfig.LogicalBackup.LogicalBackupAzureStorageAccountName,
|
||||
},
|
||||
{
|
||||
Name: "LOGICAL_BACKUP_AZURE_STORAGE_CONTAINER",
|
||||
Value: c.OpConfig.LogicalBackup.LogicalBackupAzureStorageContainer,
|
||||
},
|
||||
{
|
||||
Name: "LOGICAL_BACKUP_AZURE_STORAGE_ACCOUNT_KEY",
|
||||
Value: c.OpConfig.LogicalBackup.LogicalBackupAzureStorageAccountKey,
|
||||
},
|
||||
// Postgres env vars
|
||||
{
|
||||
Name: "PG_VERSION",
|
||||
|
|
@ -2196,7 +2353,7 @@ func (c *Cluster) generateLogicalBackupPodEnvVars() []v1.EnvVar {
|
|||
|
||||
// getLogicalBackupJobName returns the name; the job itself may not exists
|
||||
func (c *Cluster) getLogicalBackupJobName() (jobName string) {
|
||||
return trimCronjobName(c.OpConfig.LogicalBackupJobPrefix + c.clusterName().Name)
|
||||
return trimCronjobName(fmt.Sprintf("%s%s", c.OpConfig.LogicalBackupJobPrefix, c.clusterName().Name))
|
||||
}
|
||||
|
||||
// Return an array of ownerReferences to make an arbitraty object dependent on
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load Diff
|
|
@ -11,13 +11,13 @@ import (
|
|||
|
||||
// VersionMap Map of version numbers
|
||||
var VersionMap = map[string]int{
|
||||
"9.5": 90500,
|
||||
"9.6": 90600,
|
||||
"10": 100000,
|
||||
"11": 110000,
|
||||
"12": 120000,
|
||||
"13": 130000,
|
||||
"14": 140000,
|
||||
"15": 150000,
|
||||
|
||||
}
|
||||
|
||||
// IsBiggerPostgresVersion Compare two Postgres version numbers
|
||||
|
|
@ -36,7 +36,7 @@ func (c *Cluster) GetDesiredMajorVersionAsInt() int {
|
|||
func (c *Cluster) GetDesiredMajorVersion() string {
|
||||
|
||||
if c.Config.OpConfig.MajorVersionUpgradeMode == "full" {
|
||||
// e.g. current is 9.6, minimal is 11 allowing 11 to 14 clusters, everything below is upgraded
|
||||
// e.g. current is 10, minimal is 11 allowing 11 to 15 clusters, everything below is upgraded
|
||||
if IsBiggerPostgresVersion(c.Spec.PgVersion, c.Config.OpConfig.MinimalMajorVersion) {
|
||||
c.logger.Infof("overwriting configured major version %s to %s", c.Spec.PgVersion, c.Config.OpConfig.TargetMajorVersion)
|
||||
return c.Config.OpConfig.TargetMajorVersion
|
||||
|
|
@ -105,7 +105,7 @@ func (c *Cluster) majorVersionUpgrade() error {
|
|||
podName := &spec.NamespacedName{Namespace: masterPod.Namespace, Name: masterPod.Name}
|
||||
c.logger.Infof("triggering major version upgrade on pod %s of %d pods", masterPod.Name, numberOfPods)
|
||||
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Major Version Upgrade", "Starting major version upgrade on pod %s of %d pods", masterPod.Name, numberOfPods)
|
||||
upgradeCommand := fmt.Sprintf("/usr/bin/python3 /scripts/inplace_upgrade.py %d 2>&1 | tee last_upgrade.log", numberOfPods)
|
||||
upgradeCommand := fmt.Sprintf("set -o pipefail && /usr/bin/python3 /scripts/inplace_upgrade.py %d 2>&1 | tee last_upgrade.log", numberOfPods)
|
||||
|
||||
c.logger.Debugf("checking if the spilo image runs with root or non-root (check for user id=0)")
|
||||
resultIdCheck, errIdCheck := c.ExecCommand(podName, "/bin/bash", "-c", "/usr/bin/id -u")
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ package cluster
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"sort"
|
||||
"strconv"
|
||||
"time"
|
||||
|
|
@ -212,52 +211,19 @@ func (c *Cluster) movePodFromEndOfLifeNode(pod *v1.Pod) (*v1.Pod, error) {
|
|||
return newPod, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) masterCandidate(oldNodeName string) (*v1.Pod, error) {
|
||||
|
||||
// Wait until at least one replica pod will come up
|
||||
if err := c.waitForAnyReplicaLabelReady(); err != nil {
|
||||
c.logger.Warningf("could not find at least one ready replica: %v", err)
|
||||
}
|
||||
|
||||
replicas, err := c.getRolePods(Replica)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get replica pods: %v", err)
|
||||
}
|
||||
|
||||
if len(replicas) == 0 {
|
||||
c.logger.Warningf("no available master candidates, migration will cause longer downtime of Postgres cluster")
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
for i, pod := range replicas {
|
||||
// look for replicas running on live nodes. Ignore errors when querying the nodes.
|
||||
if pod.Spec.NodeName != oldNodeName {
|
||||
eol, err := c.podIsEndOfLife(&pod)
|
||||
if err == nil && !eol {
|
||||
return &replicas[i], nil
|
||||
}
|
||||
}
|
||||
}
|
||||
c.logger.Warningf("no available master candidates on live nodes")
|
||||
return &replicas[rand.Intn(len(replicas))], nil
|
||||
}
|
||||
|
||||
// MigrateMasterPod migrates master pod via failover to a replica
|
||||
func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error {
|
||||
var (
|
||||
masterCandidatePod *v1.Pod
|
||||
err error
|
||||
eol bool
|
||||
)
|
||||
|
||||
oldMaster, err := c.KubeClient.Pods(podName.Namespace).Get(context.TODO(), podName.Name, metav1.GetOptions{})
|
||||
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get pod: %v", err)
|
||||
return fmt.Errorf("could not get master pod: %v", err)
|
||||
}
|
||||
|
||||
c.logger.Infof("starting process to migrate master pod %q", podName)
|
||||
|
||||
if eol, err = c.podIsEndOfLife(oldMaster); err != nil {
|
||||
return fmt.Errorf("could not get node %q: %v", oldMaster.Spec.NodeName, err)
|
||||
}
|
||||
|
|
@ -281,11 +247,17 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error {
|
|||
}
|
||||
c.Statefulset = sset
|
||||
}
|
||||
// We may not have a cached statefulset if the initial cluster sync has aborted, revert to the spec in that case.
|
||||
// we may not have a cached statefulset if the initial cluster sync has aborted, revert to the spec in that case
|
||||
masterCandidateName := podName
|
||||
masterCandidatePod := oldMaster
|
||||
if *c.Statefulset.Spec.Replicas > 1 {
|
||||
if masterCandidatePod, err = c.masterCandidate(oldMaster.Spec.NodeName); err != nil {
|
||||
if masterCandidateName, err = c.getSwitchoverCandidate(oldMaster); err != nil {
|
||||
return fmt.Errorf("could not find suitable replica pod as candidate for failover: %v", err)
|
||||
}
|
||||
masterCandidatePod, err = c.KubeClient.Pods(masterCandidateName.Namespace).Get(context.TODO(), masterCandidateName.Name, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get master candidate pod: %v", err)
|
||||
}
|
||||
} else {
|
||||
c.logger.Warningf("migrating single pod cluster %q, this will cause downtime of the Postgres cluster until pod is back", c.clusterName())
|
||||
}
|
||||
|
|
@ -302,11 +274,10 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
if masterCandidatePod, err = c.movePodFromEndOfLifeNode(masterCandidatePod); err != nil {
|
||||
if _, err = c.movePodFromEndOfLifeNode(masterCandidatePod); err != nil {
|
||||
return fmt.Errorf("could not move pod: %v", err)
|
||||
}
|
||||
|
||||
masterCandidateName := util.NameFromMeta(masterCandidatePod.ObjectMeta)
|
||||
err = retryutil.Retry(1*time.Minute, 5*time.Minute,
|
||||
func() (bool, error) {
|
||||
err := c.Switchover(oldMaster, masterCandidateName)
|
||||
|
|
|
|||
|
|
@ -7,9 +7,9 @@ import (
|
|||
"strings"
|
||||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
batchv1beta1 "k8s.io/api/batch/v1beta1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policybeta1 "k8s.io/api/policy/v1beta1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
|
||||
|
|
@ -404,7 +404,7 @@ func (c *Cluster) generateEndpointSubsets(role PostgresRole) []v1.EndpointSubset
|
|||
return result
|
||||
}
|
||||
|
||||
func (c *Cluster) createPodDisruptionBudget() (*policybeta1.PodDisruptionBudget, error) {
|
||||
func (c *Cluster) createPodDisruptionBudget() (*policyv1.PodDisruptionBudget, error) {
|
||||
podDisruptionBudgetSpec := c.generatePodDisruptionBudget()
|
||||
podDisruptionBudget, err := c.KubeClient.
|
||||
PodDisruptionBudgets(podDisruptionBudgetSpec.Namespace).
|
||||
|
|
@ -418,7 +418,7 @@ func (c *Cluster) createPodDisruptionBudget() (*policybeta1.PodDisruptionBudget,
|
|||
return podDisruptionBudget, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) updatePodDisruptionBudget(pdb *policybeta1.PodDisruptionBudget) error {
|
||||
func (c *Cluster) updatePodDisruptionBudget(pdb *policyv1.PodDisruptionBudget) error {
|
||||
if c.PodDisruptionBudget == nil {
|
||||
return fmt.Errorf("there is no pod disruption budget in the cluster")
|
||||
}
|
||||
|
|
@ -518,7 +518,7 @@ func (c *Cluster) deleteSecret(uid types.UID, secret v1.Secret) error {
|
|||
return fmt.Errorf("could not delete secret %q: %v", secretName, err)
|
||||
}
|
||||
c.logger.Infof("secret %q has been deleted", secretName)
|
||||
c.Secrets[uid] = nil
|
||||
delete(c.Secrets, uid)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
@ -546,7 +546,7 @@ func (c *Cluster) createLogicalBackupJob() (err error) {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) patchLogicalBackupJob(newJob *batchv1beta1.CronJob) error {
|
||||
func (c *Cluster) patchLogicalBackupJob(newJob *batchv1.CronJob) error {
|
||||
c.setProcessName("patching logical backup job")
|
||||
|
||||
patchData, err := specPatch(newJob.Spec)
|
||||
|
|
@ -602,6 +602,6 @@ func (c *Cluster) GetStatefulSet() *appsv1.StatefulSet {
|
|||
}
|
||||
|
||||
// GetPodDisruptionBudget returns cluster's kubernetes PodDisruptionBudget
|
||||
func (c *Cluster) GetPodDisruptionBudget() *policybeta1.PodDisruptionBudget {
|
||||
func (c *Cluster) GetPodDisruptionBudget() *policyv1.PodDisruptionBudget {
|
||||
return c.PodDisruptionBudget
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,15 +15,16 @@ import (
|
|||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
func (c *Cluster) createStreams(appId string) error {
|
||||
func (c *Cluster) createStreams(appId string) (*zalandov1.FabricEventStream, error) {
|
||||
c.setProcessName("creating streams")
|
||||
|
||||
fes := c.generateFabricEventStream(appId)
|
||||
if _, err := c.KubeClient.FabricEventStreams(c.Namespace).Create(context.TODO(), fes, metav1.CreateOptions{}); err != nil {
|
||||
return err
|
||||
streamCRD, err := c.KubeClient.FabricEventStreams(c.Namespace).Create(context.TODO(), fes, metav1.CreateOptions{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil
|
||||
return streamCRD, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) updateStreams(newEventStreams *zalandov1.FabricEventStream) error {
|
||||
|
|
@ -46,11 +47,17 @@ func (c *Cluster) deleteStreams() error {
|
|||
}
|
||||
|
||||
errors := make([]string, 0)
|
||||
for _, appId := range c.streamApplications {
|
||||
fesName := fmt.Sprintf("%s-%s", c.Name, appId)
|
||||
err = c.KubeClient.FabricEventStreams(c.Namespace).Delete(context.TODO(), fesName, metav1.DeleteOptions{})
|
||||
listOptions := metav1.ListOptions{
|
||||
LabelSelector: c.labelsSet(true).String(),
|
||||
}
|
||||
streams, err := c.KubeClient.FabricEventStreams(c.Namespace).List(context.TODO(), listOptions)
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Sprintf("could not delete event stream %q: %v", fesName, err))
|
||||
return fmt.Errorf("could not list of FabricEventStreams: %v", err)
|
||||
}
|
||||
for _, stream := range streams.Items {
|
||||
err = c.KubeClient.FabricEventStreams(stream.Namespace).Delete(context.TODO(), stream.Name, metav1.DeleteOptions{})
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Sprintf("could not delete event stream %q: %v", stream.Name, err))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -72,38 +79,6 @@ func gatherApplicationIds(streams []acidv1.Stream) []string {
|
|||
return appIds
|
||||
}
|
||||
|
||||
func (c *Cluster) syncPostgresConfig(requiredPatroniConfig acidv1.Patroni) error {
|
||||
errorMsg := "no pods found to update config"
|
||||
|
||||
// if streams are defined wal_level must be switched to logical
|
||||
requiredPgParameters := map[string]string{"wal_level": "logical"}
|
||||
|
||||
// apply config changes in pods
|
||||
pods, err := c.listPods()
|
||||
if err != nil {
|
||||
errorMsg = fmt.Sprintf("could not list pods of the statefulset: %v", err)
|
||||
}
|
||||
for i, pod := range pods {
|
||||
podName := util.NameFromMeta(pods[i].ObjectMeta)
|
||||
effectivePatroniConfig, effectivePgParameters, err := c.patroni.GetConfig(&pod)
|
||||
if err != nil {
|
||||
errorMsg = fmt.Sprintf("could not get Postgres config from pod %s: %v", podName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
_, err = c.checkAndSetGlobalPostgreSQLConfiguration(&pod, effectivePatroniConfig, requiredPatroniConfig, effectivePgParameters, requiredPgParameters)
|
||||
if err != nil {
|
||||
errorMsg = fmt.Sprintf("could not set PostgreSQL configuration options for pod %s: %v", podName, err)
|
||||
continue
|
||||
}
|
||||
|
||||
// Patroni's config endpoint is just a "proxy" to DCS. It is enough to patch it only once and it doesn't matter which pod is used
|
||||
return nil
|
||||
}
|
||||
|
||||
return fmt.Errorf(errorMsg)
|
||||
}
|
||||
|
||||
func (c *Cluster) syncPublication(publication, dbName string, tables map[string]acidv1.StreamTable) error {
|
||||
createPublications := make(map[string]string)
|
||||
alterPublications := make(map[string]string)
|
||||
|
|
@ -184,8 +159,10 @@ func (c *Cluster) generateFabricEventStream(appId string) *zalandov1.FabricEvent
|
|||
Kind: constants.EventStreamCRDKind,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fmt.Sprintf("%s-%s", c.Name, appId),
|
||||
// max length for cluster name is 58 so we can only add 5 more characters / numbers
|
||||
Name: fmt.Sprintf("%s-%s", c.Name, strings.ToLower(util.RandomPassword(5))),
|
||||
Namespace: c.Namespace,
|
||||
Labels: c.labelsSet(true),
|
||||
Annotations: c.AnnotationsToPropagate(c.annotationsSet(nil)),
|
||||
// make cluster StatefulSet the owner (like with connection pooler objects)
|
||||
OwnerReferences: c.ownerReferences(),
|
||||
|
|
@ -264,7 +241,6 @@ func (c *Cluster) getStreamConnection(database, user, appId string) zalandov1.Co
|
|||
}
|
||||
|
||||
func (c *Cluster) syncStreams() error {
|
||||
|
||||
c.setProcessName("syncing streams")
|
||||
|
||||
_, err := c.KubeClient.CustomResourceDefinitions().Get(context.TODO(), constants.EventStreamCRDName, metav1.GetOptions{})
|
||||
|
|
@ -273,15 +249,11 @@ func (c *Cluster) syncStreams() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// fetch different application IDs from streams section
|
||||
// there will be a separate event stream resource for each ID
|
||||
appIds := gatherApplicationIds(c.Spec.Streams)
|
||||
c.streamApplications = appIds
|
||||
|
||||
slots := make(map[string]map[string]string)
|
||||
slotsToSync := make(map[string]map[string]string)
|
||||
publications := make(map[string]map[string]acidv1.StreamTable)
|
||||
|
||||
requiredPatroniConfig := c.Spec.Patroni
|
||||
|
||||
if len(requiredPatroniConfig.Slots) > 0 {
|
||||
slots = requiredPatroniConfig.Slots
|
||||
}
|
||||
|
|
@ -308,21 +280,7 @@ func (c *Cluster) syncStreams() error {
|
|||
}
|
||||
}
|
||||
|
||||
// no slots = no streams defined
|
||||
if len(slots) > 0 {
|
||||
requiredPatroniConfig.Slots = slots
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
|
||||
// add extra logical slots to Patroni config
|
||||
c.logger.Debug("syncing Postgres config for logical decoding")
|
||||
err = c.syncPostgresConfig(requiredPatroniConfig)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to snyc Postgres config for event streaming: %v", err)
|
||||
}
|
||||
|
||||
// next, create publications to each created slot
|
||||
// create publications to each created slot
|
||||
c.logger.Debug("syncing database publications")
|
||||
for publication, tables := range publications {
|
||||
// but first check for existing publications
|
||||
|
|
@ -330,9 +288,31 @@ func (c *Cluster) syncStreams() error {
|
|||
err = c.syncPublication(publication, dbName, tables)
|
||||
if err != nil {
|
||||
c.logger.Warningf("could not sync publication %q in database %q: %v", publication, dbName, err)
|
||||
continue
|
||||
}
|
||||
slotsToSync[publication] = slots[publication]
|
||||
}
|
||||
|
||||
// no slots to sync = no streams defined or publications created
|
||||
if len(slotsToSync) > 0 {
|
||||
requiredPatroniConfig.Slots = slotsToSync
|
||||
} else {
|
||||
return nil
|
||||
}
|
||||
|
||||
c.logger.Debug("syncing logical replication slots")
|
||||
pods, err := c.listPods()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not get list of pods to sync logical replication slots via Patroni API: %v", err)
|
||||
}
|
||||
|
||||
// sync logical replication slots in Patroni config
|
||||
configPatched, _, _, err := c.syncPatroniConfig(pods, requiredPatroniConfig, nil)
|
||||
if err != nil {
|
||||
c.logger.Warningf("Patroni config updated? %v - errors during config sync: %v", configPatched, err)
|
||||
}
|
||||
|
||||
// finally sync stream CRDs
|
||||
err = c.createOrUpdateStreams()
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -342,31 +322,48 @@ func (c *Cluster) syncStreams() error {
|
|||
}
|
||||
|
||||
func (c *Cluster) createOrUpdateStreams() error {
|
||||
for _, appId := range c.streamApplications {
|
||||
fesName := fmt.Sprintf("%s-%s", c.Name, appId)
|
||||
effectiveStreams, err := c.KubeClient.FabricEventStreams(c.Namespace).Get(context.TODO(), fesName, metav1.GetOptions{})
|
||||
|
||||
// fetch different application IDs from streams section
|
||||
// there will be a separate event stream resource for each ID
|
||||
appIds := gatherApplicationIds(c.Spec.Streams)
|
||||
|
||||
// list all existing stream CRDs
|
||||
listOptions := metav1.ListOptions{
|
||||
LabelSelector: c.labelsSet(true).String(),
|
||||
}
|
||||
streams, err := c.KubeClient.FabricEventStreams(c.Namespace).List(context.TODO(), listOptions)
|
||||
if err != nil {
|
||||
if !k8sutil.ResourceNotFound(err) {
|
||||
return fmt.Errorf("failed reading event stream %s: %v", fesName, err)
|
||||
return fmt.Errorf("could not list of FabricEventStreams: %v", err)
|
||||
}
|
||||
|
||||
c.logger.Infof("event streams do not exist, create it")
|
||||
err = c.createStreams(appId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed creating event stream %s: %v", fesName, err)
|
||||
}
|
||||
c.logger.Infof("event stream %q has been successfully created", fesName)
|
||||
} else {
|
||||
for _, appId := range appIds {
|
||||
streamExists := false
|
||||
|
||||
// update stream when it exists and EventStreams array differs
|
||||
for _, stream := range streams.Items {
|
||||
if appId == stream.Spec.ApplicationId {
|
||||
streamExists = true
|
||||
desiredStreams := c.generateFabricEventStream(appId)
|
||||
if match, reason := sameStreams(effectiveStreams.Spec.EventStreams, desiredStreams.Spec.EventStreams); !match {
|
||||
if match, reason := sameStreams(stream.Spec.EventStreams, desiredStreams.Spec.EventStreams); !match {
|
||||
c.logger.Debugf("updating event streams: %s", reason)
|
||||
desiredStreams.ObjectMeta.ResourceVersion = effectiveStreams.ObjectMeta.ResourceVersion
|
||||
desiredStreams.ObjectMeta = stream.ObjectMeta
|
||||
err = c.updateStreams(desiredStreams)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed updating event stream %s: %v", fesName, err)
|
||||
return fmt.Errorf("failed updating event stream %s: %v", stream.Name, err)
|
||||
}
|
||||
c.logger.Infof("event stream %q has been successfully updated", fesName)
|
||||
c.logger.Infof("event stream %q has been successfully updated", stream.Name)
|
||||
}
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if !streamExists {
|
||||
c.logger.Infof("event streams with applicationId %s do not exist, create it", appId)
|
||||
streamCRD, err := c.createStreams(appId)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed creating event streams with applicationId %s: %v", appId, err)
|
||||
}
|
||||
c.logger.Infof("event streams %q have been successfully created", streamCRD.Name)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1,9 +1,7 @@
|
|||
package cluster
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"context"
|
||||
|
|
@ -40,8 +38,7 @@ var (
|
|||
namespace string = "default"
|
||||
appId string = "test-app"
|
||||
dbName string = "foo"
|
||||
fesUser string = constants.EventStreamSourceSlotPrefix + constants.UserRoleNameSuffix
|
||||
fesName string = fmt.Sprintf("%s-%s", clusterName, appId)
|
||||
fesUser string = fmt.Sprintf("%s%s", constants.EventStreamSourceSlotPrefix, constants.UserRoleNameSuffix)
|
||||
slotName string = fmt.Sprintf("%s_%s_%s", constants.EventStreamSourceSlotPrefix, dbName, strings.Replace(appId, "-", "_", -1))
|
||||
|
||||
pg = acidv1.Postgresql{
|
||||
|
|
@ -55,7 +52,7 @@ var (
|
|||
},
|
||||
Spec: acidv1.PostgresSpec{
|
||||
Databases: map[string]string{
|
||||
dbName: dbName + constants.UserRoleNameSuffix,
|
||||
dbName: fmt.Sprintf("%s%s", dbName, constants.UserRoleNameSuffix),
|
||||
},
|
||||
Streams: []acidv1.Stream{
|
||||
{
|
||||
|
|
@ -77,6 +74,7 @@ var (
|
|||
BatchSize: k8sutil.UInt32ToPointer(uint32(100)),
|
||||
},
|
||||
},
|
||||
TeamID: "acid",
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1Gi",
|
||||
},
|
||||
|
|
@ -89,7 +87,7 @@ var (
|
|||
Kind: constants.EventStreamCRDKind,
|
||||
},
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: fesName,
|
||||
Name: fmt.Sprintf("%s-12345", clusterName),
|
||||
Namespace: namespace,
|
||||
OwnerReferences: []metav1.OwnerReference{
|
||||
metav1.OwnerReference{
|
||||
|
|
@ -167,6 +165,15 @@ var (
|
|||
}
|
||||
)
|
||||
|
||||
func TestGatherApplicationIds(t *testing.T) {
|
||||
testAppIds := []string{appId}
|
||||
appIds := gatherApplicationIds(pg.Spec.Streams)
|
||||
|
||||
if !util.IsEqualIgnoreOrder(testAppIds, appIds) {
|
||||
t.Errorf("gathered applicationIds do not match, expected %#v, got %#v", testAppIds, appIds)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGenerateFabricEventStream(t *testing.T) {
|
||||
client, _ := newFakeK8sStreamClient()
|
||||
|
||||
|
|
@ -196,9 +203,6 @@ func TestGenerateFabricEventStream(t *testing.T) {
|
|||
_, err := cluster.createStatefulSet()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// createOrUpdateStreams will loop over existing apps
|
||||
cluster.streamApplications = []string{appId}
|
||||
|
||||
// create the streams
|
||||
err = cluster.createOrUpdateStreams()
|
||||
assert.NoError(t, err)
|
||||
|
|
@ -209,22 +213,37 @@ func TestGenerateFabricEventStream(t *testing.T) {
|
|||
t.Errorf("malformed FabricEventStream, expected %#v, got %#v", fes, result)
|
||||
}
|
||||
|
||||
// compare stream resturned from API with expected stream
|
||||
streamCRD, err := cluster.KubeClient.FabricEventStreams(namespace).Get(context.TODO(), fesName, metav1.GetOptions{})
|
||||
listOptions := metav1.ListOptions{
|
||||
LabelSelector: cluster.labelsSet(true).String(),
|
||||
}
|
||||
streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
|
||||
assert.NoError(t, err)
|
||||
if match, _ := sameStreams(streamCRD.Spec.EventStreams, fes.Spec.EventStreams); !match {
|
||||
t.Errorf("malformed FabricEventStream returned from API, expected %#v, got %#v", fes, streamCRD)
|
||||
|
||||
// check if there is only one stream
|
||||
if len(streams.Items) > 1 {
|
||||
t.Errorf("too many stream CRDs found: got %d, but expected only one", len(streams.Items))
|
||||
}
|
||||
|
||||
// compare stream returned from API with expected stream
|
||||
if match, _ := sameStreams(streams.Items[0].Spec.EventStreams, fes.Spec.EventStreams); !match {
|
||||
t.Errorf("malformed FabricEventStream returned from API, expected %#v, got %#v", fes, streams.Items[0])
|
||||
}
|
||||
|
||||
// sync streams once again
|
||||
err = cluster.createOrUpdateStreams()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// compare stream resturned from API with generated stream
|
||||
streamCRD, err = cluster.KubeClient.FabricEventStreams(namespace).Get(context.TODO(), fesName, metav1.GetOptions{})
|
||||
streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
|
||||
assert.NoError(t, err)
|
||||
if match, _ := sameStreams(streamCRD.Spec.EventStreams, result.Spec.EventStreams); !match {
|
||||
t.Errorf("returned FabricEventStream differs from generated one, expected %#v, got %#v", result, streamCRD)
|
||||
|
||||
// check if there is still only one stream
|
||||
if len(streams.Items) > 1 {
|
||||
t.Errorf("too many stream CRDs found after sync: got %d, but expected only one", len(streams.Items))
|
||||
}
|
||||
|
||||
// compare stream resturned from API with generated stream
|
||||
if match, _ := sameStreams(streams.Items[0].Spec.EventStreams, result.Spec.EventStreams); !match {
|
||||
t.Errorf("returned FabricEventStream differs from generated one, expected %#v, got %#v", result, streams.Items[0])
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -331,45 +350,45 @@ func TestUpdateFabricEventStream(t *testing.T) {
|
|||
context.TODO(), &pg, metav1.CreateOptions{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// createOrUpdateStreams will loop over existing apps
|
||||
cluster.streamApplications = []string{appId}
|
||||
// create statefulset to have ownerReference for streams
|
||||
_, err = cluster.createStatefulSet()
|
||||
assert.NoError(t, err)
|
||||
|
||||
// now create the stream
|
||||
err = cluster.createOrUpdateStreams()
|
||||
assert.NoError(t, err)
|
||||
|
||||
var pgSpec acidv1.PostgresSpec
|
||||
pgSpec.Streams = []acidv1.Stream{
|
||||
{
|
||||
ApplicationId: appId,
|
||||
Database: dbName,
|
||||
Tables: map[string]acidv1.StreamTable{
|
||||
"data.bar": acidv1.StreamTable{
|
||||
EventType: "stream-type-c",
|
||||
IdColumn: k8sutil.StringToPointer("b_id"),
|
||||
PayloadColumn: k8sutil.StringToPointer("b_payload"),
|
||||
},
|
||||
},
|
||||
BatchSize: k8sutil.UInt32ToPointer(uint32(250)),
|
||||
},
|
||||
// change specs of streams and patch CRD
|
||||
for i, stream := range pg.Spec.Streams {
|
||||
if stream.ApplicationId == appId {
|
||||
streamTable := stream.Tables["data.bar"]
|
||||
streamTable.EventType = "stream-type-c"
|
||||
stream.Tables["data.bar"] = streamTable
|
||||
stream.BatchSize = k8sutil.UInt32ToPointer(uint32(250))
|
||||
pg.Spec.Streams[i] = stream
|
||||
}
|
||||
patch, err := json.Marshal(struct {
|
||||
PostgresSpec interface{} `json:"spec"`
|
||||
}{&pgSpec})
|
||||
}
|
||||
|
||||
patchData, err := specPatch(pg.Spec)
|
||||
assert.NoError(t, err)
|
||||
|
||||
pgPatched, err := cluster.KubeClient.Postgresqls(namespace).Patch(
|
||||
context.TODO(), cluster.Name, types.MergePatchType, patch, metav1.PatchOptions{}, "spec")
|
||||
context.TODO(), cluster.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, "spec")
|
||||
assert.NoError(t, err)
|
||||
|
||||
cluster.Postgresql.Spec = pgPatched.Spec
|
||||
err = cluster.createOrUpdateStreams()
|
||||
assert.NoError(t, err)
|
||||
|
||||
streamCRD, err := cluster.KubeClient.FabricEventStreams(namespace).Get(context.TODO(), fesName, metav1.GetOptions{})
|
||||
// compare stream returned from API with expected stream
|
||||
listOptions := metav1.ListOptions{
|
||||
LabelSelector: cluster.labelsSet(true).String(),
|
||||
}
|
||||
streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
|
||||
assert.NoError(t, err)
|
||||
|
||||
result := cluster.generateFabricEventStream(appId)
|
||||
if !reflect.DeepEqual(result, streamCRD) {
|
||||
t.Errorf("Malformed FabricEventStream, expected %#v, got %#v", streamCRD, result)
|
||||
if match, _ := sameStreams(streams.Items[0].Spec.EventStreams, result.Spec.EventStreams); !match {
|
||||
t.Errorf("Malformed FabricEventStream, expected %#v, got %#v", streams.Items[0], result)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -15,13 +15,13 @@ import (
|
|||
"github.com/zalando/postgres-operator/pkg/util"
|
||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||
batchv1beta1 "k8s.io/api/batch/v1beta1"
|
||||
batchv1 "k8s.io/api/batch/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policybeta1 "k8s.io/api/policy/v1beta1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
var requireMasterRestartWhenDecreased = []string{
|
||||
var requirePrimaryRestartWhenDecreased = []string{
|
||||
"max_connections",
|
||||
"max_prepared_transactions",
|
||||
"max_locks_per_transaction",
|
||||
|
|
@ -104,18 +104,15 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
|
|||
if !(c.databaseAccessDisabled() || c.getNumberOfInstances(&newSpec.Spec) <= 0 || c.Spec.StandbyCluster != nil) {
|
||||
c.logger.Debug("syncing roles")
|
||||
if err = c.syncRoles(); err != nil {
|
||||
err = fmt.Errorf("could not sync roles: %v", err)
|
||||
return err
|
||||
c.logger.Errorf("could not sync roles: %v", err)
|
||||
}
|
||||
c.logger.Debug("syncing databases")
|
||||
if err = c.syncDatabases(); err != nil {
|
||||
err = fmt.Errorf("could not sync databases: %v", err)
|
||||
return err
|
||||
c.logger.Errorf("could not sync databases: %v", err)
|
||||
}
|
||||
c.logger.Debug("syncing prepared databases with schemas")
|
||||
if err = c.syncPreparedDatabases(); err != nil {
|
||||
err = fmt.Errorf("could not sync prepared database: %v", err)
|
||||
return err
|
||||
c.logger.Errorf("could not sync prepared database: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -236,7 +233,7 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error {
|
|||
|
||||
func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error {
|
||||
var (
|
||||
pdb *policybeta1.PodDisruptionBudget
|
||||
pdb *policyv1.PodDisruptionBudget
|
||||
err error
|
||||
)
|
||||
if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.podDisruptionBudgetName(), metav1.GetOptions{}); err == nil {
|
||||
|
|
@ -279,7 +276,8 @@ func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error {
|
|||
func (c *Cluster) syncStatefulSet() error {
|
||||
var (
|
||||
restartWait uint32
|
||||
restartMasterFirst bool
|
||||
configPatched bool
|
||||
restartPrimaryFirst bool
|
||||
)
|
||||
podsToRecreate := make([]v1.Pod, 0)
|
||||
isSafeToRecreatePods := true
|
||||
|
|
@ -397,68 +395,34 @@ func (c *Cluster) syncStatefulSet() error {
|
|||
}
|
||||
}
|
||||
|
||||
// Apply special PostgreSQL parameters that can only be set via the Patroni API.
|
||||
// apply PostgreSQL parameters that can only be set via the Patroni API.
|
||||
// it is important to do it after the statefulset pods are there, but before the rolling update
|
||||
// since those parameters require PostgreSQL restart.
|
||||
pods, err = c.listPods()
|
||||
if err != nil {
|
||||
c.logger.Warnf("could not get list of pods to apply special PostgreSQL parameters only to be set via Patroni API: %v", err)
|
||||
c.logger.Warnf("could not get list of pods to apply PostgreSQL parameters only to be set via Patroni API: %v", err)
|
||||
}
|
||||
|
||||
// get Postgres config, compare with manifest and update via Patroni PATCH endpoint if it differs
|
||||
// Patroni's config endpoint is just a "proxy" to DCS. It is enough to patch it only once and it doesn't matter which pod is used
|
||||
for i, pod := range pods {
|
||||
patroniConfig, pgParameters, err := c.getPatroniConfig(&pod)
|
||||
if err != nil {
|
||||
c.logger.Warningf("%v", err)
|
||||
isSafeToRecreatePods = false
|
||||
continue
|
||||
requiredPgParameters := make(map[string]string)
|
||||
for k, v := range c.Spec.Parameters {
|
||||
requiredPgParameters[k] = v
|
||||
}
|
||||
restartWait = patroniConfig.LoopWait
|
||||
|
||||
// empty config probably means cluster is not fully initialized yet, e.g. restoring from backup
|
||||
// do not attempt a restart
|
||||
if !reflect.DeepEqual(patroniConfig, acidv1.Patroni{}) || len(pgParameters) > 0 {
|
||||
// compare config returned from Patroni with what is specified in the manifest
|
||||
restartMasterFirst, err = c.checkAndSetGlobalPostgreSQLConfiguration(&pod, patroniConfig, c.Spec.Patroni, pgParameters, c.Spec.Parameters)
|
||||
if err != nil {
|
||||
c.logger.Warningf("could not set PostgreSQL configuration options for pod %s: %v", pods[i].Name, err)
|
||||
continue
|
||||
// if streams are defined wal_level must be switched to logical
|
||||
if len(c.Spec.Streams) > 0 {
|
||||
requiredPgParameters["wal_level"] = "logical"
|
||||
}
|
||||
|
||||
// it could take up to LoopWait to apply the config
|
||||
time.Sleep(time.Duration(restartWait)*time.Second + time.Second*2)
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
// restart instances if required
|
||||
remainingPods := make([]*v1.Pod, 0)
|
||||
skipRole := Master
|
||||
if restartMasterFirst {
|
||||
skipRole = Replica
|
||||
}
|
||||
for i, pod := range pods {
|
||||
role := PostgresRole(pod.Labels[c.OpConfig.PodRoleLabel])
|
||||
if role == skipRole {
|
||||
remainingPods = append(remainingPods, &pods[i])
|
||||
continue
|
||||
}
|
||||
if err = c.restartInstance(&pod, restartWait); err != nil {
|
||||
c.logger.Errorf("%v", err)
|
||||
// sync Patroni config
|
||||
if configPatched, restartPrimaryFirst, restartWait, err = c.syncPatroniConfig(pods, c.Spec.Patroni, requiredPgParameters); err != nil {
|
||||
c.logger.Warningf("Patroni config updated? %v - errors during config sync: %v", configPatched, err)
|
||||
isSafeToRecreatePods = false
|
||||
}
|
||||
}
|
||||
|
||||
// in most cases only the master should be left to restart
|
||||
if len(remainingPods) > 0 {
|
||||
for _, remainingPod := range remainingPods {
|
||||
if err = c.restartInstance(remainingPod, restartWait); err != nil {
|
||||
c.logger.Errorf("%v", err)
|
||||
// restart Postgres where it is still pending
|
||||
if err = c.restartInstances(pods, restartWait, restartPrimaryFirst); err != nil {
|
||||
c.logger.Errorf("errors while restarting Postgres in pods via Patroni API: %v", err)
|
||||
isSafeToRecreatePods = false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// if we get here we also need to re-create the pods (either leftovers from the old
|
||||
// statefulset or those that got their configuration from the outdated statefulset)
|
||||
|
|
@ -471,9 +435,95 @@ func (c *Cluster) syncStatefulSet() error {
|
|||
}
|
||||
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", "Rolling update done - pods have been recreated")
|
||||
} else {
|
||||
c.logger.Warningf("postpone pod recreation until next sync")
|
||||
c.logger.Warningf("postpone pod recreation until next sync because of errors during config sync")
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) syncPatroniConfig(pods []v1.Pod, requiredPatroniConfig acidv1.Patroni, requiredPgParameters map[string]string) (bool, bool, uint32, error) {
|
||||
var (
|
||||
effectivePatroniConfig acidv1.Patroni
|
||||
effectivePgParameters map[string]string
|
||||
loopWait uint32
|
||||
configPatched bool
|
||||
restartPrimaryFirst bool
|
||||
err error
|
||||
)
|
||||
|
||||
errors := make([]string, 0)
|
||||
|
||||
// get Postgres config, compare with manifest and update via Patroni PATCH endpoint if it differs
|
||||
for i, pod := range pods {
|
||||
podName := util.NameFromMeta(pods[i].ObjectMeta)
|
||||
effectivePatroniConfig, effectivePgParameters, err = c.patroni.GetConfig(&pod)
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Sprintf("could not get Postgres config from pod %s: %v", podName, err))
|
||||
continue
|
||||
}
|
||||
loopWait = effectivePatroniConfig.LoopWait
|
||||
|
||||
// empty config probably means cluster is not fully initialized yet, e.g. restoring from backup
|
||||
if reflect.DeepEqual(effectivePatroniConfig, acidv1.Patroni{}) || len(effectivePgParameters) == 0 {
|
||||
errors = append(errors, fmt.Sprintf("empty Patroni config on pod %s - skipping config patch", podName))
|
||||
} else {
|
||||
configPatched, restartPrimaryFirst, err = c.checkAndSetGlobalPostgreSQLConfiguration(&pod, effectivePatroniConfig, requiredPatroniConfig, effectivePgParameters, requiredPgParameters)
|
||||
if err != nil {
|
||||
errors = append(errors, fmt.Sprintf("could not set PostgreSQL configuration options for pod %s: %v", podName, err))
|
||||
continue
|
||||
}
|
||||
|
||||
// it could take up to LoopWait to apply the config
|
||||
if configPatched {
|
||||
time.Sleep(time.Duration(loopWait)*time.Second + time.Second*2)
|
||||
// Patroni's config endpoint is just a "proxy" to DCS.
|
||||
// It is enough to patch it only once and it doesn't matter which pod is used
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
err = fmt.Errorf("%v", strings.Join(errors, `', '`))
|
||||
}
|
||||
|
||||
return configPatched, restartPrimaryFirst, loopWait, err
|
||||
}
|
||||
|
||||
func (c *Cluster) restartInstances(pods []v1.Pod, restartWait uint32, restartPrimaryFirst bool) (err error) {
|
||||
errors := make([]string, 0)
|
||||
remainingPods := make([]*v1.Pod, 0)
|
||||
|
||||
skipRole := Master
|
||||
if restartPrimaryFirst {
|
||||
skipRole = Replica
|
||||
}
|
||||
|
||||
for i, pod := range pods {
|
||||
role := PostgresRole(pod.Labels[c.OpConfig.PodRoleLabel])
|
||||
if role == skipRole {
|
||||
remainingPods = append(remainingPods, &pods[i])
|
||||
continue
|
||||
}
|
||||
if err = c.restartInstance(&pod, restartWait); err != nil {
|
||||
errors = append(errors, fmt.Sprintf("%v", err))
|
||||
}
|
||||
}
|
||||
|
||||
// in most cases only the master should be left to restart
|
||||
if len(remainingPods) > 0 {
|
||||
for _, remainingPod := range remainingPods {
|
||||
if err = c.restartInstance(remainingPod, restartWait); err != nil {
|
||||
errors = append(errors, fmt.Sprintf("%v", err))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
return fmt.Errorf("%v", strings.Join(errors, `', '`))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
@ -533,10 +583,11 @@ func (c *Cluster) AnnotationsToPropagate(annotations map[string]string) map[stri
|
|||
|
||||
// checkAndSetGlobalPostgreSQLConfiguration checks whether cluster-wide API parameters
|
||||
// (like max_connections) have changed and if necessary sets it via the Patroni API
|
||||
func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration(pod *v1.Pod, effectivePatroniConfig, desiredPatroniConfig acidv1.Patroni, effectivePgParameters, desiredPgParameters map[string]string) (bool, error) {
|
||||
func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration(pod *v1.Pod, effectivePatroniConfig, desiredPatroniConfig acidv1.Patroni, effectivePgParameters, desiredPgParameters map[string]string) (bool, bool, error) {
|
||||
configToSet := make(map[string]interface{})
|
||||
parametersToSet := make(map[string]string)
|
||||
restartMaster := make([]bool, 0)
|
||||
restartPrimary := make([]bool, 0)
|
||||
configPatched := false
|
||||
requiresMasterRestart := false
|
||||
|
||||
// compare effective and desired Patroni config options
|
||||
|
|
@ -562,8 +613,33 @@ func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration(pod *v1.Pod, effectiv
|
|||
configToSet["ttl"] = desiredPatroniConfig.TTL
|
||||
}
|
||||
|
||||
var desiredFailsafe *bool
|
||||
if desiredPatroniConfig.FailsafeMode != nil {
|
||||
desiredFailsafe = desiredPatroniConfig.FailsafeMode
|
||||
} else if c.OpConfig.EnablePatroniFailsafeMode != nil {
|
||||
desiredFailsafe = c.OpConfig.EnablePatroniFailsafeMode
|
||||
}
|
||||
|
||||
effectiveFailsafe := effectivePatroniConfig.FailsafeMode
|
||||
|
||||
if desiredFailsafe != nil {
|
||||
if effectiveFailsafe == nil || *desiredFailsafe != *effectiveFailsafe {
|
||||
configToSet["failsafe_mode"] = *desiredFailsafe
|
||||
}
|
||||
}
|
||||
|
||||
slotsToSet := make(map[string]interface{})
|
||||
// check if there is any slot deletion
|
||||
for slotName, effectiveSlot := range c.replicationSlots {
|
||||
if desiredSlot, exists := desiredPatroniConfig.Slots[slotName]; exists {
|
||||
if reflect.DeepEqual(effectiveSlot, desiredSlot) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
slotsToSet[slotName] = nil
|
||||
delete(c.replicationSlots, slotName)
|
||||
}
|
||||
// check if specified slots exist in config and if they differ
|
||||
slotsToSet := make(map[string]map[string]string)
|
||||
for slotName, desiredSlot := range desiredPatroniConfig.Slots {
|
||||
if effectiveSlot, exists := effectivePatroniConfig.Slots[slotName]; exists {
|
||||
if reflect.DeepEqual(desiredSlot, effectiveSlot) {
|
||||
|
|
@ -571,6 +647,12 @@ func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration(pod *v1.Pod, effectiv
|
|||
}
|
||||
}
|
||||
slotsToSet[slotName] = desiredSlot
|
||||
// only add slots specified in manifest to c.replicationSlots
|
||||
for manifestSlotName, _ := range c.Spec.Patroni.Slots {
|
||||
if manifestSlotName == slotName {
|
||||
c.replicationSlots[slotName] = desiredSlot
|
||||
}
|
||||
}
|
||||
}
|
||||
if len(slotsToSet) > 0 {
|
||||
configToSet["slots"] = slotsToSet
|
||||
|
|
@ -581,22 +663,23 @@ func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration(pod *v1.Pod, effectiv
|
|||
effectiveValue := effectivePgParameters[desiredOption]
|
||||
if isBootstrapOnlyParameter(desiredOption) && (effectiveValue != desiredValue) {
|
||||
parametersToSet[desiredOption] = desiredValue
|
||||
if util.SliceContains(requireMasterRestartWhenDecreased, desiredOption) {
|
||||
if util.SliceContains(requirePrimaryRestartWhenDecreased, desiredOption) {
|
||||
effectiveValueNum, errConv := strconv.Atoi(effectiveValue)
|
||||
desiredValueNum, errConv2 := strconv.Atoi(desiredValue)
|
||||
if errConv != nil || errConv2 != nil {
|
||||
continue
|
||||
}
|
||||
if effectiveValueNum > desiredValueNum {
|
||||
restartMaster = append(restartMaster, true)
|
||||
restartPrimary = append(restartPrimary, true)
|
||||
continue
|
||||
}
|
||||
}
|
||||
restartMaster = append(restartMaster, false)
|
||||
restartPrimary = append(restartPrimary, false)
|
||||
}
|
||||
}
|
||||
|
||||
if !util.SliceContains(restartMaster, false) && len(configToSet) == 0 {
|
||||
// check if there exist only config updates that require a restart of the primary
|
||||
if len(restartPrimary) > 0 && !util.SliceContains(restartPrimary, false) && len(configToSet) == 0 {
|
||||
requiresMasterRestart = true
|
||||
}
|
||||
|
||||
|
|
@ -605,7 +688,7 @@ func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration(pod *v1.Pod, effectiv
|
|||
}
|
||||
|
||||
if len(configToSet) == 0 {
|
||||
return false, nil
|
||||
return configPatched, requiresMasterRestart, nil
|
||||
}
|
||||
|
||||
configToSetJson, err := json.Marshal(configToSet)
|
||||
|
|
@ -619,18 +702,17 @@ func (c *Cluster) checkAndSetGlobalPostgreSQLConfiguration(pod *v1.Pod, effectiv
|
|||
c.logger.Debugf("patching Postgres config via Patroni API on pod %s with following options: %s",
|
||||
podName, configToSetJson)
|
||||
if err = c.patroni.SetConfig(pod, configToSet); err != nil {
|
||||
return requiresMasterRestart, fmt.Errorf("could not patch postgres parameters within pod %s: %v", podName, err)
|
||||
return configPatched, requiresMasterRestart, fmt.Errorf("could not patch postgres parameters within pod %s: %v", podName, err)
|
||||
}
|
||||
configPatched = true
|
||||
|
||||
return requiresMasterRestart, nil
|
||||
return configPatched, requiresMasterRestart, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) syncSecrets() error {
|
||||
|
||||
c.logger.Info("syncing secrets")
|
||||
c.setProcessName("syncing secrets")
|
||||
generatedSecrets := c.generateUserSecrets()
|
||||
rotationUsers := make(spec.PgUserMap)
|
||||
retentionUsers := make([]string, 0)
|
||||
currentTime := time.Now()
|
||||
|
||||
|
|
@ -642,7 +724,7 @@ func (c *Cluster) syncSecrets() error {
|
|||
continue
|
||||
}
|
||||
if k8sutil.ResourceAlreadyExists(err) {
|
||||
if err = c.updateSecret(secretUsername, generatedSecret, &rotationUsers, &retentionUsers, currentTime); err != nil {
|
||||
if err = c.updateSecret(secretUsername, generatedSecret, &retentionUsers, currentTime); err != nil {
|
||||
c.logger.Warningf("syncing secret %s failed: %v", util.NameFromMeta(secret.ObjectMeta), err)
|
||||
}
|
||||
} else {
|
||||
|
|
@ -650,21 +732,6 @@ func (c *Cluster) syncSecrets() error {
|
|||
}
|
||||
}
|
||||
|
||||
// add new user with date suffix and use it in the secret of the original user
|
||||
if len(rotationUsers) > 0 {
|
||||
err := c.initDbConn()
|
||||
if err != nil {
|
||||
return fmt.Errorf("could not init db connection: %v", err)
|
||||
}
|
||||
pgSyncRequests := c.userSyncStrategy.ProduceSyncRequests(spec.PgUserMap{}, rotationUsers)
|
||||
if err = c.userSyncStrategy.ExecuteSyncRequests(pgSyncRequests, c.pgDb); err != nil {
|
||||
return fmt.Errorf("error creating database roles for password rotation: %v", err)
|
||||
}
|
||||
if err := c.closeDbConn(); err != nil {
|
||||
c.logger.Errorf("could not close database connection after creating users for password rotation: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// remove rotation users that exceed the retention interval
|
||||
if len(retentionUsers) > 0 {
|
||||
err := c.initDbConn()
|
||||
|
|
@ -690,7 +757,6 @@ func (c *Cluster) getNextRotationDate(currentDate time.Time) (time.Time, string)
|
|||
func (c *Cluster) updateSecret(
|
||||
secretUsername string,
|
||||
generatedSecret *v1.Secret,
|
||||
rotationUsers *spec.PgUserMap,
|
||||
retentionUsers *[]string,
|
||||
currentTime time.Time) error {
|
||||
var (
|
||||
|
|
@ -698,8 +764,6 @@ func (c *Cluster) updateSecret(
|
|||
err error
|
||||
updateSecret bool
|
||||
updateSecretMsg string
|
||||
nextRotationDate time.Time
|
||||
nextRotationDateStr string
|
||||
)
|
||||
|
||||
// get the secret first
|
||||
|
|
@ -721,50 +785,42 @@ func (c *Cluster) updateSecret(
|
|||
userKey = secretUsername
|
||||
userMap = c.pgUsers
|
||||
}
|
||||
|
||||
// use system user when pooler is enabled and pooler user is specfied in manifest
|
||||
if _, exists := c.systemUsers[constants.ConnectionPoolerUserKeyName]; exists {
|
||||
if secretUsername == c.systemUsers[constants.ConnectionPoolerUserKeyName].Name {
|
||||
userKey = constants.ConnectionPoolerUserKeyName
|
||||
userMap = c.systemUsers
|
||||
}
|
||||
}
|
||||
// use system user when streams are defined and fes_user is specfied in manifest
|
||||
if _, exists := c.systemUsers[constants.EventStreamUserKeyName]; exists {
|
||||
if secretUsername == c.systemUsers[constants.EventStreamUserKeyName].Name {
|
||||
userKey = constants.EventStreamUserKeyName
|
||||
userMap = c.systemUsers
|
||||
}
|
||||
}
|
||||
|
||||
pwdUser := userMap[userKey]
|
||||
secretName := util.NameFromMeta(secret.ObjectMeta)
|
||||
|
||||
// if password rotation is enabled update password and username if rotation interval has been passed
|
||||
if (c.OpConfig.EnablePasswordRotation && !pwdUser.IsDbOwner &&
|
||||
pwdUser.Origin != spec.RoleOriginInfrastructure && pwdUser.Origin != spec.RoleOriginSystem) ||
|
||||
util.SliceContains(c.Spec.UsersWithSecretRotation, secretUsername) ||
|
||||
util.SliceContains(c.Spec.UsersWithInPlaceSecretRotation, secretUsername) {
|
||||
// rotation can be enabled globally or via the manifest (excluding the Postgres superuser)
|
||||
rotationEnabledInManifest := secretUsername != constants.SuperuserKeyName &&
|
||||
(util.SliceContains(c.Spec.UsersWithSecretRotation, secretUsername) ||
|
||||
util.SliceContains(c.Spec.UsersWithInPlaceSecretRotation, secretUsername))
|
||||
|
||||
// initialize password rotation setting first rotation date
|
||||
nextRotationDateStr = string(secret.Data["nextRotation"])
|
||||
if nextRotationDate, err = time.ParseInLocation(time.RFC3339, nextRotationDateStr, currentTime.UTC().Location()); err != nil {
|
||||
nextRotationDate, nextRotationDateStr = c.getNextRotationDate(currentTime)
|
||||
secret.Data["nextRotation"] = []byte(nextRotationDateStr)
|
||||
// globally enabled rotation is only allowed for manifest and bootstrapped roles
|
||||
allowedRoleTypes := []spec.RoleOrigin{spec.RoleOriginManifest, spec.RoleOriginBootstrap}
|
||||
rotationAllowed := !pwdUser.IsDbOwner && util.SliceContains(allowedRoleTypes, pwdUser.Origin) && c.Spec.StandbyCluster == nil
|
||||
|
||||
if (c.OpConfig.EnablePasswordRotation && rotationAllowed) || rotationEnabledInManifest {
|
||||
updateSecretMsg, err = c.rotatePasswordInSecret(secret, secretUsername, pwdUser.Origin, currentTime, retentionUsers)
|
||||
if err != nil {
|
||||
c.logger.Warnf("password rotation failed for user %s: %v", secretUsername, err)
|
||||
}
|
||||
if updateSecretMsg != "" {
|
||||
updateSecret = true
|
||||
updateSecretMsg = fmt.Sprintf("rotation date not found in secret %q. Setting it to %s", secretName, nextRotationDateStr)
|
||||
}
|
||||
|
||||
// check if next rotation can happen sooner
|
||||
// if rotation interval has been decreased
|
||||
currentRotationDate, nextRotationDateStr := c.getNextRotationDate(currentTime)
|
||||
if nextRotationDate.After(currentRotationDate) {
|
||||
nextRotationDate = currentRotationDate
|
||||
}
|
||||
|
||||
// update password and next rotation date if configured interval has passed
|
||||
if currentTime.After(nextRotationDate) {
|
||||
// create rotation user if role is not listed for in-place password update
|
||||
if !util.SliceContains(c.Spec.UsersWithInPlaceSecretRotation, secretUsername) {
|
||||
rotationUser := pwdUser
|
||||
newRotationUsername := secretUsername + currentTime.Format("060102")
|
||||
rotationUser.Name = newRotationUsername
|
||||
rotationUser.MemberOf = []string{secretUsername}
|
||||
(*rotationUsers)[newRotationUsername] = rotationUser
|
||||
secret.Data["username"] = []byte(newRotationUsername)
|
||||
|
||||
// whenever there is a rotation, check if old rotation users can be deleted
|
||||
*retentionUsers = append(*retentionUsers, secretUsername)
|
||||
}
|
||||
secret.Data["password"] = []byte(util.RandomPassword(constants.PasswordLength))
|
||||
secret.Data["nextRotation"] = []byte(nextRotationDateStr)
|
||||
|
||||
updateSecret = true
|
||||
updateSecretMsg = fmt.Sprintf("updating secret %q due to password rotation - next rotation date: %s", secretName, nextRotationDateStr)
|
||||
}
|
||||
} else {
|
||||
// username might not match if password rotation has been disabled again
|
||||
|
|
@ -784,15 +840,21 @@ func (c *Cluster) updateSecret(
|
|||
updateSecret = true
|
||||
updateSecretMsg = fmt.Sprintf("updating the secret %s from the infrastructure roles", secretName)
|
||||
} else {
|
||||
// for non-infrastructure role - update the role with the password from the secret
|
||||
// for non-infrastructure role - update the role with username and password from secret
|
||||
pwdUser.Name = string(secret.Data["username"])
|
||||
pwdUser.Password = string(secret.Data["password"])
|
||||
// update membership if we deal with a rotation user
|
||||
if secretUsername != pwdUser.Name {
|
||||
pwdUser.Rotated = true
|
||||
pwdUser.MemberOf = []string{secretUsername}
|
||||
}
|
||||
userMap[userKey] = pwdUser
|
||||
}
|
||||
|
||||
if updateSecret {
|
||||
c.logger.Debugln(updateSecretMsg)
|
||||
if _, err = c.KubeClient.Secrets(secret.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{}); err != nil {
|
||||
return fmt.Errorf("could not update secret %q: %v", secretName, err)
|
||||
return fmt.Errorf("could not update secret %s: %v", secretName, err)
|
||||
}
|
||||
c.Secrets[secret.UID] = secret
|
||||
}
|
||||
|
|
@ -800,11 +862,96 @@ func (c *Cluster) updateSecret(
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Cluster) rotatePasswordInSecret(
|
||||
secret *v1.Secret,
|
||||
secretUsername string,
|
||||
roleOrigin spec.RoleOrigin,
|
||||
currentTime time.Time,
|
||||
retentionUsers *[]string) (string, error) {
|
||||
var (
|
||||
err error
|
||||
nextRotationDate time.Time
|
||||
nextRotationDateStr string
|
||||
updateSecretMsg string
|
||||
)
|
||||
|
||||
secretName := util.NameFromMeta(secret.ObjectMeta)
|
||||
|
||||
// initialize password rotation setting first rotation date
|
||||
nextRotationDateStr = string(secret.Data["nextRotation"])
|
||||
if nextRotationDate, err = time.ParseInLocation(time.RFC3339, nextRotationDateStr, currentTime.UTC().Location()); err != nil {
|
||||
nextRotationDate, nextRotationDateStr = c.getNextRotationDate(currentTime)
|
||||
secret.Data["nextRotation"] = []byte(nextRotationDateStr)
|
||||
updateSecretMsg = fmt.Sprintf("rotation date not found in secret %s. Setting it to %s", secretName, nextRotationDateStr)
|
||||
}
|
||||
|
||||
// check if next rotation can happen sooner
|
||||
// if rotation interval has been decreased
|
||||
currentRotationDate, nextRotationDateStr := c.getNextRotationDate(currentTime)
|
||||
if nextRotationDate.After(currentRotationDate) {
|
||||
nextRotationDate = currentRotationDate
|
||||
}
|
||||
|
||||
// update password and next rotation date if configured interval has passed
|
||||
if currentTime.After(nextRotationDate) {
|
||||
// create rotation user if role is not listed for in-place password update
|
||||
if !util.SliceContains(c.Spec.UsersWithInPlaceSecretRotation, secretUsername) {
|
||||
rotationUsername := fmt.Sprintf("%s%s", secretUsername, currentTime.Format(constants.RotationUserDateFormat))
|
||||
secret.Data["username"] = []byte(rotationUsername)
|
||||
c.logger.Infof("updating username in secret %s and creating rotation user %s in the database", secretName, rotationUsername)
|
||||
// whenever there is a rotation, check if old rotation users can be deleted
|
||||
*retentionUsers = append(*retentionUsers, secretUsername)
|
||||
} else {
|
||||
// when passwords of system users are rotated in place, pods have to be replaced
|
||||
if roleOrigin == spec.RoleOriginSystem {
|
||||
pods, err := c.listPods()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not list pods of the statefulset: %v", err)
|
||||
}
|
||||
for _, pod := range pods {
|
||||
if err = c.markRollingUpdateFlagForPod(&pod,
|
||||
fmt.Sprintf("replace pod due to password rotation of system user %s", secretUsername)); err != nil {
|
||||
c.logger.Warnf("marking pod for rolling update due to password rotation failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// when password of connection pooler is rotated in place, pooler pods have to be replaced
|
||||
if roleOrigin == spec.RoleOriginConnectionPooler {
|
||||
listOptions := metav1.ListOptions{
|
||||
LabelSelector: c.poolerLabelsSet(true).String(),
|
||||
}
|
||||
poolerPods, err := c.listPoolerPods(listOptions)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("could not list pods of the pooler deployment: %v", err)
|
||||
}
|
||||
for _, poolerPod := range poolerPods {
|
||||
if err = c.markRollingUpdateFlagForPod(&poolerPod,
|
||||
fmt.Sprintf("replace pooler pod due to password rotation of pooler user %s", secretUsername)); err != nil {
|
||||
c.logger.Warnf("marking pooler pod for rolling update due to password rotation failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// when password of stream user is rotated in place, it should trigger rolling update in FES deployment
|
||||
if roleOrigin == spec.RoleOriginStream {
|
||||
c.logger.Warnf("password in secret of stream user %s changed", constants.EventStreamSourceSlotPrefix+constants.UserRoleNameSuffix)
|
||||
}
|
||||
}
|
||||
secret.Data["password"] = []byte(util.RandomPassword(constants.PasswordLength))
|
||||
secret.Data["nextRotation"] = []byte(nextRotationDateStr)
|
||||
updateSecretMsg = fmt.Sprintf("updating secret %s due to password rotation - next rotation date: %s", secretName, nextRotationDateStr)
|
||||
}
|
||||
|
||||
return updateSecretMsg, nil
|
||||
}
|
||||
|
||||
func (c *Cluster) syncRoles() (err error) {
|
||||
c.setProcessName("syncing roles")
|
||||
|
||||
var (
|
||||
dbUsers spec.PgUserMap
|
||||
newUsers spec.PgUserMap
|
||||
userNames []string
|
||||
)
|
||||
|
||||
|
|
@ -825,11 +972,18 @@ func (c *Cluster) syncRoles() (err error) {
|
|||
|
||||
// mapping between original role name and with deletion suffix
|
||||
deletedUsers := map[string]string{}
|
||||
newUsers = make(map[string]spec.PgUser)
|
||||
|
||||
// create list of database roles to query
|
||||
for _, u := range c.pgUsers {
|
||||
pgRole := u.Name
|
||||
userNames = append(userNames, pgRole)
|
||||
|
||||
// when a rotation happened add group role to query its rolconfig
|
||||
if u.Rotated {
|
||||
userNames = append(userNames, u.MemberOf[0])
|
||||
}
|
||||
|
||||
// add team member role name with rename suffix in case we need to rename it back
|
||||
if u.Origin == spec.RoleOriginTeamsAPI && c.OpConfig.EnableTeamMemberDeprecation {
|
||||
deletedUsers[pgRole+c.OpConfig.RoleDeletionSuffix] = pgRole
|
||||
|
|
@ -845,15 +999,10 @@ func (c *Cluster) syncRoles() (err error) {
|
|||
}
|
||||
}
|
||||
|
||||
// add pooler user to list of pgUsers, too
|
||||
// to check if the pooler user exists or has to be created
|
||||
if needMasterConnectionPooler(&c.Spec) || needReplicaConnectionPooler(&c.Spec) {
|
||||
connectionPoolerUser := c.systemUsers[constants.ConnectionPoolerUserKeyName]
|
||||
userNames = append(userNames, connectionPoolerUser.Name)
|
||||
|
||||
if _, exists := c.pgUsers[connectionPoolerUser.Name]; !exists {
|
||||
c.pgUsers[connectionPoolerUser.Name] = connectionPoolerUser
|
||||
}
|
||||
// search also for system users
|
||||
for _, systemUser := range c.systemUsers {
|
||||
userNames = append(userNames, systemUser.Name)
|
||||
newUsers[systemUser.Name] = systemUser
|
||||
}
|
||||
|
||||
dbUsers, err = c.readPgUsersFromDatabase(userNames)
|
||||
|
|
@ -861,17 +1010,37 @@ func (c *Cluster) syncRoles() (err error) {
|
|||
return fmt.Errorf("error getting users from the database: %v", err)
|
||||
}
|
||||
|
||||
// update pgUsers where a deleted role was found
|
||||
// so that they are skipped in ProduceSyncRequests
|
||||
DBUSERS:
|
||||
for _, dbUser := range dbUsers {
|
||||
if originalUser, exists := deletedUsers[dbUser.Name]; exists {
|
||||
recreatedUser := c.pgUsers[originalUser]
|
||||
recreatedUser.Deleted = true
|
||||
c.pgUsers[originalUser] = recreatedUser
|
||||
// copy rolconfig to rotation users
|
||||
for pgUserName, pgUser := range c.pgUsers {
|
||||
if pgUser.Rotated && pgUser.MemberOf[0] == dbUser.Name {
|
||||
pgUser.Parameters = dbUser.Parameters
|
||||
c.pgUsers[pgUserName] = pgUser
|
||||
// remove group role from dbUsers to not count as deleted role
|
||||
delete(dbUsers, dbUser.Name)
|
||||
continue DBUSERS
|
||||
}
|
||||
}
|
||||
|
||||
pgSyncRequests := c.userSyncStrategy.ProduceSyncRequests(dbUsers, c.pgUsers)
|
||||
// update pgUsers where a deleted role was found
|
||||
// so that they are skipped in ProduceSyncRequests
|
||||
originalUsername, foundDeletedUser := deletedUsers[dbUser.Name]
|
||||
// check if original user does not exist in dbUsers
|
||||
_, originalUserAlreadyExists := dbUsers[originalUsername]
|
||||
if foundDeletedUser && !originalUserAlreadyExists {
|
||||
recreatedUser := c.pgUsers[originalUsername]
|
||||
recreatedUser.Deleted = true
|
||||
c.pgUsers[originalUsername] = recreatedUser
|
||||
}
|
||||
}
|
||||
|
||||
// last but not least copy pgUsers to newUsers to send to ProduceSyncRequests
|
||||
for _, pgUser := range c.pgUsers {
|
||||
newUsers[pgUser.Name] = pgUser
|
||||
}
|
||||
|
||||
pgSyncRequests := c.userSyncStrategy.ProduceSyncRequests(dbUsers, newUsers)
|
||||
if err = c.userSyncStrategy.ExecuteSyncRequests(pgSyncRequests, c.pgDb); err != nil {
|
||||
return fmt.Errorf("error executing sync statements: %v", err)
|
||||
}
|
||||
|
|
@ -907,7 +1076,7 @@ func (c *Cluster) syncDatabases() error {
|
|||
for preparedDatabaseName := range c.Spec.PreparedDatabases {
|
||||
_, exists := currentDatabases[preparedDatabaseName]
|
||||
if !exists {
|
||||
createDatabases[preparedDatabaseName] = preparedDatabaseName + constants.OwnerRoleNameSuffix
|
||||
createDatabases[preparedDatabaseName] = fmt.Sprintf("%s%s", preparedDatabaseName, constants.OwnerRoleNameSuffix)
|
||||
preparedDatabases = append(preparedDatabases, preparedDatabaseName)
|
||||
}
|
||||
}
|
||||
|
|
@ -1008,9 +1177,9 @@ func (c *Cluster) syncPreparedSchemas(databaseName string, preparedSchemas map[s
|
|||
if createPreparedSchemas, equal := util.SubstractStringSlices(schemas, currentSchemas); !equal {
|
||||
for _, schemaName := range createPreparedSchemas {
|
||||
owner := constants.OwnerRoleNameSuffix
|
||||
dbOwner := databaseName + owner
|
||||
dbOwner := fmt.Sprintf("%s%s", databaseName, owner)
|
||||
if preparedSchemas[schemaName].DefaultRoles == nil || *preparedSchemas[schemaName].DefaultRoles {
|
||||
owner = databaseName + "_" + schemaName + owner
|
||||
owner = fmt.Sprintf("%s_%s%s", databaseName, schemaName, owner)
|
||||
} else {
|
||||
owner = dbOwner
|
||||
}
|
||||
|
|
@ -1059,8 +1228,8 @@ func (c *Cluster) syncExtensions(extensions map[string]string) error {
|
|||
|
||||
func (c *Cluster) syncLogicalBackupJob() error {
|
||||
var (
|
||||
job *batchv1beta1.CronJob
|
||||
desiredJob *batchv1beta1.CronJob
|
||||
job *batchv1.CronJob
|
||||
desiredJob *batchv1.CronJob
|
||||
err error
|
||||
)
|
||||
c.setProcessName("syncing the logical backup job")
|
||||
|
|
|
|||
|
|
@ -20,7 +20,9 @@ import (
|
|||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
fakeacidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/fake"
|
||||
"github.com/zalando/postgres-operator/pkg/spec"
|
||||
"github.com/zalando/postgres-operator/pkg/util"
|
||||
"github.com/zalando/postgres-operator/pkg/util/config"
|
||||
"github.com/zalando/postgres-operator/pkg/util/constants"
|
||||
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
|
||||
"github.com/zalando/postgres-operator/pkg/util/patroni"
|
||||
"k8s.io/client-go/kubernetes/fake"
|
||||
|
|
@ -143,24 +145,34 @@ func TestCheckAndSetGlobalPostgreSQLConfiguration(t *testing.T) {
|
|||
client, _ := newFakeK8sSyncClient()
|
||||
clusterName := "acid-test-cluster"
|
||||
namespace := "default"
|
||||
testSlots := map[string]map[string]string{
|
||||
"slot1": {
|
||||
"type": "logical",
|
||||
"plugin": "wal2json",
|
||||
"database": "foo",
|
||||
},
|
||||
}
|
||||
|
||||
ctrl := gomock.NewController(t)
|
||||
defer ctrl.Finish()
|
||||
|
||||
defaultPgParameters := map[string]string{
|
||||
"log_min_duration_statement": "200",
|
||||
"max_connections": "50",
|
||||
}
|
||||
defaultPatroniParameters := acidv1.Patroni{
|
||||
TTL: 20,
|
||||
}
|
||||
|
||||
pg := acidv1.Postgresql{
|
||||
ObjectMeta: metav1.ObjectMeta{
|
||||
Name: clusterName,
|
||||
Namespace: namespace,
|
||||
},
|
||||
Spec: acidv1.PostgresSpec{
|
||||
Patroni: acidv1.Patroni{
|
||||
TTL: 20,
|
||||
},
|
||||
Patroni: defaultPatroniParameters,
|
||||
PostgresqlParam: acidv1.PostgresqlParam{
|
||||
Parameters: map[string]string{
|
||||
"log_min_duration_statement": "200",
|
||||
"max_connections": "50",
|
||||
},
|
||||
Parameters: defaultPgParameters,
|
||||
},
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1Gi",
|
||||
|
|
@ -206,9 +218,24 @@ func TestCheckAndSetGlobalPostgreSQLConfiguration(t *testing.T) {
|
|||
tests := []struct {
|
||||
subtest string
|
||||
patroni acidv1.Patroni
|
||||
desiredSlots map[string]map[string]string
|
||||
removedSlots map[string]map[string]string
|
||||
pgParams map[string]string
|
||||
restartMaster bool
|
||||
shouldBePatched bool
|
||||
restartPrimary bool
|
||||
}{
|
||||
{
|
||||
subtest: "Patroni and Postgresql.Parameters do not differ",
|
||||
patroni: acidv1.Patroni{
|
||||
TTL: 20,
|
||||
},
|
||||
pgParams: map[string]string{
|
||||
"log_min_duration_statement": "200",
|
||||
"max_connections": "50",
|
||||
},
|
||||
shouldBePatched: false,
|
||||
restartPrimary: false,
|
||||
},
|
||||
{
|
||||
subtest: "Patroni and Postgresql.Parameters differ - restart replica first",
|
||||
patroni: acidv1.Patroni{
|
||||
|
|
@ -218,48 +245,237 @@ func TestCheckAndSetGlobalPostgreSQLConfiguration(t *testing.T) {
|
|||
"log_min_duration_statement": "500", // desired 200
|
||||
"max_connections": "100", // desired 50
|
||||
},
|
||||
restartMaster: false,
|
||||
shouldBePatched: true,
|
||||
restartPrimary: false,
|
||||
},
|
||||
{
|
||||
subtest: "multiple Postgresql.Parameters differ - restart replica first",
|
||||
patroni: acidv1.Patroni{
|
||||
TTL: 20,
|
||||
},
|
||||
patroni: defaultPatroniParameters,
|
||||
pgParams: map[string]string{
|
||||
"log_min_duration_statement": "500", // desired 200
|
||||
"max_connections": "100", // desired 50
|
||||
},
|
||||
restartMaster: false,
|
||||
shouldBePatched: true,
|
||||
restartPrimary: false,
|
||||
},
|
||||
{
|
||||
subtest: "desired max_connections bigger - restart replica first",
|
||||
patroni: acidv1.Patroni{
|
||||
TTL: 20,
|
||||
},
|
||||
patroni: defaultPatroniParameters,
|
||||
pgParams: map[string]string{
|
||||
"log_min_duration_statement": "200",
|
||||
"max_connections": "30", // desired 50
|
||||
},
|
||||
restartMaster: false,
|
||||
shouldBePatched: true,
|
||||
restartPrimary: false,
|
||||
},
|
||||
{
|
||||
subtest: "desired max_connections smaller - restart master first",
|
||||
patroni: acidv1.Patroni{
|
||||
TTL: 20,
|
||||
},
|
||||
patroni: defaultPatroniParameters,
|
||||
pgParams: map[string]string{
|
||||
"log_min_duration_statement": "200",
|
||||
"max_connections": "100", // desired 50
|
||||
},
|
||||
restartMaster: true,
|
||||
shouldBePatched: true,
|
||||
restartPrimary: true,
|
||||
},
|
||||
{
|
||||
subtest: "slot does not exist but is desired",
|
||||
patroni: acidv1.Patroni{
|
||||
TTL: 20,
|
||||
},
|
||||
desiredSlots: testSlots,
|
||||
pgParams: map[string]string{
|
||||
"log_min_duration_statement": "200",
|
||||
"max_connections": "50",
|
||||
},
|
||||
shouldBePatched: true,
|
||||
restartPrimary: false,
|
||||
},
|
||||
{
|
||||
subtest: "slot exist, nothing specified in manifest",
|
||||
patroni: acidv1.Patroni{
|
||||
TTL: 20,
|
||||
Slots: map[string]map[string]string{
|
||||
"slot1": {
|
||||
"type": "logical",
|
||||
"plugin": "pgoutput",
|
||||
"database": "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
pgParams: map[string]string{
|
||||
"log_min_duration_statement": "200",
|
||||
"max_connections": "50",
|
||||
},
|
||||
shouldBePatched: false,
|
||||
restartPrimary: false,
|
||||
},
|
||||
{
|
||||
subtest: "slot is removed from manifest",
|
||||
patroni: acidv1.Patroni{
|
||||
TTL: 20,
|
||||
Slots: map[string]map[string]string{
|
||||
"slot1": {
|
||||
"type": "logical",
|
||||
"plugin": "pgoutput",
|
||||
"database": "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
removedSlots: testSlots,
|
||||
pgParams: map[string]string{
|
||||
"log_min_duration_statement": "200",
|
||||
"max_connections": "50",
|
||||
},
|
||||
shouldBePatched: true,
|
||||
restartPrimary: false,
|
||||
},
|
||||
{
|
||||
subtest: "slot plugin differs",
|
||||
patroni: acidv1.Patroni{
|
||||
TTL: 20,
|
||||
Slots: map[string]map[string]string{
|
||||
"slot1": {
|
||||
"type": "logical",
|
||||
"plugin": "pgoutput",
|
||||
"database": "foo",
|
||||
},
|
||||
},
|
||||
},
|
||||
desiredSlots: testSlots,
|
||||
pgParams: map[string]string{
|
||||
"log_min_duration_statement": "200",
|
||||
"max_connections": "50",
|
||||
},
|
||||
shouldBePatched: true,
|
||||
restartPrimary: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
requireMasterRestart, err := cluster.checkAndSetGlobalPostgreSQLConfiguration(mockPod, tt.patroni, cluster.Spec.Patroni, tt.pgParams, cluster.Spec.Parameters)
|
||||
if len(tt.desiredSlots) > 0 {
|
||||
cluster.Spec.Patroni.Slots = tt.desiredSlots
|
||||
}
|
||||
if len(tt.removedSlots) > 0 {
|
||||
for slotName, removedSlot := range tt.removedSlots {
|
||||
cluster.replicationSlots[slotName] = removedSlot
|
||||
}
|
||||
}
|
||||
|
||||
configPatched, requirePrimaryRestart, err := cluster.checkAndSetGlobalPostgreSQLConfiguration(mockPod, tt.patroni, cluster.Spec.Patroni, tt.pgParams, cluster.Spec.Parameters)
|
||||
assert.NoError(t, err)
|
||||
if requireMasterRestart != tt.restartMaster {
|
||||
t.Errorf("%s - %s: unexpect master restart strategy, got %v, expected %v", testName, tt.subtest, requireMasterRestart, tt.restartMaster)
|
||||
if configPatched != tt.shouldBePatched {
|
||||
t.Errorf("%s - %s: expected config update did not happen", testName, tt.subtest)
|
||||
}
|
||||
if requirePrimaryRestart != tt.restartPrimary {
|
||||
t.Errorf("%s - %s: wrong master restart strategy, got restart %v, expected restart %v", testName, tt.subtest, requirePrimaryRestart, tt.restartPrimary)
|
||||
}
|
||||
|
||||
// reset slots for next tests
|
||||
cluster.Spec.Patroni.Slots = nil
|
||||
cluster.replicationSlots = make(map[string]interface{})
|
||||
}
|
||||
|
||||
testsFailsafe := []struct {
|
||||
subtest string
|
||||
operatorVal *bool
|
||||
effectiveVal *bool
|
||||
desiredVal bool
|
||||
shouldBePatched bool
|
||||
restartPrimary bool
|
||||
}{
|
||||
{
|
||||
subtest: "Not set in operator config, not set for pg cluster. Set to true in the pg config.",
|
||||
operatorVal: nil,
|
||||
effectiveVal: nil,
|
||||
desiredVal: true,
|
||||
shouldBePatched: true,
|
||||
restartPrimary: false,
|
||||
},
|
||||
{
|
||||
subtest: "Not set in operator config, disabled for pg cluster. Set to true in the pg config.",
|
||||
operatorVal: nil,
|
||||
effectiveVal: util.False(),
|
||||
desiredVal: true,
|
||||
shouldBePatched: true,
|
||||
restartPrimary: false,
|
||||
},
|
||||
{
|
||||
subtest: "Not set in operator config, not set for pg cluster. Set to false in the pg config.",
|
||||
operatorVal: nil,
|
||||
effectiveVal: nil,
|
||||
desiredVal: false,
|
||||
shouldBePatched: true,
|
||||
restartPrimary: false,
|
||||
},
|
||||
{
|
||||
subtest: "Not set in operator config, enabled for pg cluster. Set to false in the pg config.",
|
||||
operatorVal: nil,
|
||||
effectiveVal: util.True(),
|
||||
desiredVal: false,
|
||||
shouldBePatched: true,
|
||||
restartPrimary: false,
|
||||
},
|
||||
{
|
||||
subtest: "Enabled in operator config, not set for pg cluster. Set to false in the pg config.",
|
||||
operatorVal: util.True(),
|
||||
effectiveVal: nil,
|
||||
desiredVal: false,
|
||||
shouldBePatched: true,
|
||||
restartPrimary: false,
|
||||
},
|
||||
{
|
||||
subtest: "Enabled in operator config, disabled for pg cluster. Set to true in the pg config.",
|
||||
operatorVal: util.True(),
|
||||
effectiveVal: util.False(),
|
||||
desiredVal: true,
|
||||
shouldBePatched: true,
|
||||
restartPrimary: false,
|
||||
},
|
||||
{
|
||||
subtest: "Disabled in operator config, not set for pg cluster. Set to true in the pg config.",
|
||||
operatorVal: util.False(),
|
||||
effectiveVal: nil,
|
||||
desiredVal: true,
|
||||
shouldBePatched: true,
|
||||
restartPrimary: false,
|
||||
},
|
||||
{
|
||||
subtest: "Disabled in operator config, enabled for pg cluster. Set to false in the pg config.",
|
||||
operatorVal: util.False(),
|
||||
effectiveVal: util.True(),
|
||||
desiredVal: false,
|
||||
shouldBePatched: true,
|
||||
restartPrimary: false,
|
||||
},
|
||||
{
|
||||
subtest: "Disabled in operator config, enabled for pg cluster. Set to true in the pg config.",
|
||||
operatorVal: util.False(),
|
||||
effectiveVal: util.True(),
|
||||
desiredVal: true,
|
||||
shouldBePatched: false, // should not require patching
|
||||
restartPrimary: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range testsFailsafe {
|
||||
patroniConf := defaultPatroniParameters
|
||||
|
||||
if tt.operatorVal != nil {
|
||||
cluster.OpConfig.EnablePatroniFailsafeMode = tt.operatorVal
|
||||
}
|
||||
if tt.effectiveVal != nil {
|
||||
patroniConf.FailsafeMode = tt.effectiveVal
|
||||
}
|
||||
cluster.Spec.Patroni.FailsafeMode = &tt.desiredVal
|
||||
|
||||
configPatched, requirePrimaryRestart, err := cluster.checkAndSetGlobalPostgreSQLConfiguration(mockPod, patroniConf, cluster.Spec.Patroni, defaultPgParameters, cluster.Spec.Parameters)
|
||||
assert.NoError(t, err)
|
||||
if configPatched != tt.shouldBePatched {
|
||||
t.Errorf("%s - %s: expected update went wrong", testName, tt.subtest)
|
||||
}
|
||||
if requirePrimaryRestart != tt.restartPrimary {
|
||||
t.Errorf("%s - %s: wrong master restart strategy, got restart %v, expected restart %v", testName, tt.subtest, requirePrimaryRestart, tt.restartPrimary)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -273,7 +489,6 @@ func TestUpdateSecret(t *testing.T) {
|
|||
dbname := "app"
|
||||
dbowner := "appowner"
|
||||
secretTemplate := config.StringTemplate("{username}.{cluster}.credentials")
|
||||
rotationUsers := make(spec.PgUserMap)
|
||||
retentionUsers := make([]string, 0)
|
||||
|
||||
// define manifest users and enable rotation for dbowner
|
||||
|
|
@ -286,6 +501,17 @@ func TestUpdateSecret(t *testing.T) {
|
|||
Databases: map[string]string{dbname: dbowner},
|
||||
Users: map[string]acidv1.UserFlags{"foo": {}, dbowner: {}},
|
||||
UsersWithInPlaceSecretRotation: []string{dbowner},
|
||||
Streams: []acidv1.Stream{
|
||||
{
|
||||
ApplicationId: appId,
|
||||
Database: dbname,
|
||||
Tables: map[string]acidv1.StreamTable{
|
||||
"data.foo": acidv1.StreamTable{
|
||||
EventType: "stream-type-b",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Volume: acidv1.Volume{
|
||||
Size: "1Gi",
|
||||
},
|
||||
|
|
@ -297,6 +523,8 @@ func TestUpdateSecret(t *testing.T) {
|
|||
Config{
|
||||
OpConfig: config.Config{
|
||||
Auth: config.Auth{
|
||||
SuperUsername: "postgres",
|
||||
ReplicationUsername: "standby",
|
||||
SecretNameTemplate: secretTemplate,
|
||||
EnablePasswordRotation: true,
|
||||
PasswordRotationInterval: 1,
|
||||
|
|
@ -312,8 +540,9 @@ func TestUpdateSecret(t *testing.T) {
|
|||
cluster.Name = clusterName
|
||||
cluster.Namespace = namespace
|
||||
cluster.pgUsers = map[string]spec.PgUser{}
|
||||
cluster.initRobotUsers()
|
||||
|
||||
// init all users
|
||||
cluster.initUsers()
|
||||
// create secrets
|
||||
cluster.syncSecrets()
|
||||
// initialize rotation with current time
|
||||
|
|
@ -321,22 +550,33 @@ func TestUpdateSecret(t *testing.T) {
|
|||
|
||||
dayAfterTomorrow := time.Now().AddDate(0, 0, 2)
|
||||
|
||||
for username := range cluster.Spec.Users {
|
||||
pgUser := cluster.pgUsers[username]
|
||||
allUsers := make(map[string]spec.PgUser)
|
||||
for _, pgUser := range cluster.pgUsers {
|
||||
allUsers[pgUser.Name] = pgUser
|
||||
}
|
||||
for _, systemUser := range cluster.systemUsers {
|
||||
allUsers[systemUser.Name] = systemUser
|
||||
}
|
||||
|
||||
for username, pgUser := range allUsers {
|
||||
// first, get the secret
|
||||
secret, err := cluster.KubeClient.Secrets(namespace).Get(context.TODO(), secretTemplate.Format("username", username, "cluster", clusterName), metav1.GetOptions{})
|
||||
secretName := cluster.credentialSecretName(username)
|
||||
secret, err := cluster.KubeClient.Secrets(namespace).Get(context.TODO(), secretName, metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
secretPassword := string(secret.Data["password"])
|
||||
|
||||
// now update the secret setting a next rotation date (tomorrow + interval)
|
||||
cluster.updateSecret(username, secret, &rotationUsers, &retentionUsers, dayAfterTomorrow)
|
||||
updatedSecret, err := cluster.KubeClient.Secrets(namespace).Get(context.TODO(), secretTemplate.Format("username", username, "cluster", clusterName), metav1.GetOptions{})
|
||||
cluster.updateSecret(username, secret, &retentionUsers, dayAfterTomorrow)
|
||||
updatedSecret, err := cluster.KubeClient.Secrets(namespace).Get(context.TODO(), secretName, metav1.GetOptions{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// check that passwords are different
|
||||
rotatedPassword := string(updatedSecret.Data["password"])
|
||||
if secretPassword == rotatedPassword {
|
||||
// passwords for system users should not have been rotated
|
||||
if pgUser.Origin != spec.RoleOriginManifest {
|
||||
continue
|
||||
}
|
||||
t.Errorf("%s: password unchanged in updated secret for %s", testName, username)
|
||||
}
|
||||
|
||||
|
|
@ -354,13 +594,13 @@ func TestUpdateSecret(t *testing.T) {
|
|||
t.Errorf("%s: username differs in updated secret: expected %s, got %s", testName, username, secretUsername)
|
||||
}
|
||||
} else {
|
||||
rotatedUsername := username + dayAfterTomorrow.Format("060102")
|
||||
rotatedUsername := username + dayAfterTomorrow.Format(constants.RotationUserDateFormat)
|
||||
if secretUsername != rotatedUsername {
|
||||
t.Errorf("%s: updated secret does not contain correct username: expected %s, got %s", testName, rotatedUsername, secretUsername)
|
||||
}
|
||||
|
||||
if len(rotationUsers) != 1 && len(retentionUsers) != 1 {
|
||||
t.Errorf("%s: unexpected number of users to rotate - expected only %s, found %d", testName, username, len(rotationUsers))
|
||||
// whenever there's a rotation the retentionUsers list is extended or updated
|
||||
if len(retentionUsers) != 1 {
|
||||
t.Errorf("%s: unexpected number of users to drop - expected only %s, found %d", testName, username, len(retentionUsers))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,7 +6,7 @@ import (
|
|||
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policybeta1 "k8s.io/api/policy/v1beta1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
|
|
@ -59,12 +59,13 @@ type WorkerStatus struct {
|
|||
type ClusterStatus struct {
|
||||
Team string
|
||||
Cluster string
|
||||
Namespace string
|
||||
MasterService *v1.Service
|
||||
ReplicaService *v1.Service
|
||||
MasterEndpoint *v1.Endpoints
|
||||
ReplicaEndpoint *v1.Endpoints
|
||||
StatefulSet *appsv1.StatefulSet
|
||||
PodDisruptionBudget *policybeta1.PodDisruptionBudget
|
||||
PodDisruptionBudget *policyv1.PodDisruptionBudget
|
||||
|
||||
CurrentProcess Process
|
||||
Worker uint32
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ import (
|
|||
|
||||
appsv1 "k8s.io/api/apps/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
policybeta1 "k8s.io/api/policy/v1beta1"
|
||||
policyv1 "k8s.io/api/policy/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/labels"
|
||||
|
||||
|
|
@ -159,7 +159,7 @@ func metaAnnotationsPatch(annotations map[string]string) ([]byte, error) {
|
|||
}{&meta})
|
||||
}
|
||||
|
||||
func (c *Cluster) logPDBChanges(old, new *policybeta1.PodDisruptionBudget, isUpdate bool, reason string) {
|
||||
func (c *Cluster) logPDBChanges(old, new *policyv1.PodDisruptionBudget, isUpdate bool, reason string) {
|
||||
if isUpdate {
|
||||
c.logger.Infof("pod disruption budget %q has been changed", util.NameFromMeta(old.ObjectMeta))
|
||||
} else {
|
||||
|
|
@ -244,7 +244,12 @@ func getPostgresContainer(podSpec *v1.PodSpec) (pgContainer v1.Container) {
|
|||
func (c *Cluster) getTeamMembers(teamID string) ([]string, error) {
|
||||
|
||||
if teamID == "" {
|
||||
return nil, fmt.Errorf("no teamId specified")
|
||||
msg := "no teamId specified"
|
||||
if c.OpConfig.EnableTeamIdClusternamePrefix {
|
||||
return nil, fmt.Errorf(msg)
|
||||
}
|
||||
c.logger.Warnf(msg)
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
members := []string{}
|
||||
|
|
@ -500,16 +505,55 @@ func (c *Cluster) roleLabelsSet(shouldAddExtraLabels bool, role PostgresRole) la
|
|||
return lbls
|
||||
}
|
||||
|
||||
func (c *Cluster) masterDNSName() string {
|
||||
func (c *Cluster) dnsName(role PostgresRole) string {
|
||||
var dnsString, oldDnsString string
|
||||
|
||||
if role == Master {
|
||||
dnsString = c.masterDNSName(c.Name)
|
||||
} else {
|
||||
dnsString = c.replicaDNSName(c.Name)
|
||||
}
|
||||
|
||||
// if cluster name starts with teamID we might need to provide backwards compatibility
|
||||
clusterNameWithoutTeamPrefix, _ := acidv1.ExtractClusterName(c.Name, c.Spec.TeamID)
|
||||
if clusterNameWithoutTeamPrefix != "" {
|
||||
if role == Master {
|
||||
oldDnsString = c.oldMasterDNSName(clusterNameWithoutTeamPrefix)
|
||||
} else {
|
||||
oldDnsString = c.oldReplicaDNSName(clusterNameWithoutTeamPrefix)
|
||||
}
|
||||
dnsString = fmt.Sprintf("%s,%s", dnsString, oldDnsString)
|
||||
}
|
||||
|
||||
return dnsString
|
||||
}
|
||||
|
||||
func (c *Cluster) masterDNSName(clusterName string) string {
|
||||
return strings.ToLower(c.OpConfig.MasterDNSNameFormat.Format(
|
||||
"cluster", c.Spec.ClusterName,
|
||||
"cluster", clusterName,
|
||||
"namespace", c.Namespace,
|
||||
"team", c.teamName(),
|
||||
"hostedzone", c.OpConfig.DbHostedZone))
|
||||
}
|
||||
|
||||
func (c *Cluster) replicaDNSName() string {
|
||||
func (c *Cluster) replicaDNSName(clusterName string) string {
|
||||
return strings.ToLower(c.OpConfig.ReplicaDNSNameFormat.Format(
|
||||
"cluster", c.Spec.ClusterName,
|
||||
"cluster", clusterName,
|
||||
"namespace", c.Namespace,
|
||||
"team", c.teamName(),
|
||||
"hostedzone", c.OpConfig.DbHostedZone))
|
||||
}
|
||||
|
||||
func (c *Cluster) oldMasterDNSName(clusterName string) string {
|
||||
return strings.ToLower(c.OpConfig.MasterLegacyDNSNameFormat.Format(
|
||||
"cluster", clusterName,
|
||||
"team", c.teamName(),
|
||||
"hostedzone", c.OpConfig.DbHostedZone))
|
||||
}
|
||||
|
||||
func (c *Cluster) oldReplicaDNSName(clusterName string) string {
|
||||
return strings.ToLower(c.OpConfig.ReplicaLegacyDNSNameFormat.Format(
|
||||
"cluster", clusterName,
|
||||
"team", c.teamName(),
|
||||
"hostedzone", c.OpConfig.DbHostedZone))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ func newFakeK8sAnnotationsClient() (k8sutil.KubernetesClient, *k8sFake.Clientset
|
|||
acidClientSet := fakeacidv1.NewSimpleClientset()
|
||||
|
||||
return k8sutil.KubernetesClient{
|
||||
PodDisruptionBudgetsGetter: clientSet.PolicyV1beta1(),
|
||||
PodDisruptionBudgetsGetter: clientSet.PolicyV1(),
|
||||
ServicesGetter: clientSet.CoreV1(),
|
||||
StatefulSetsGetter: clientSet.AppsV1(),
|
||||
PostgresqlsGetter: acidClientSet.AcidV1(),
|
||||
|
|
|
|||
|
|
@ -15,11 +15,11 @@ import (
|
|||
)
|
||||
|
||||
// ClusterStatus provides status of the cluster
|
||||
func (c *Controller) ClusterStatus(team, namespace, cluster string) (*cluster.ClusterStatus, error) {
|
||||
func (c *Controller) ClusterStatus(namespace, cluster string) (*cluster.ClusterStatus, error) {
|
||||
|
||||
clusterName := spec.NamespacedName{
|
||||
Namespace: namespace,
|
||||
Name: team + "-" + cluster,
|
||||
Name: cluster,
|
||||
}
|
||||
|
||||
c.clustersMu.RLock()
|
||||
|
|
@ -92,11 +92,11 @@ func (c *Controller) GetStatus() *spec.ControllerStatus {
|
|||
}
|
||||
|
||||
// ClusterLogs dumps cluster ring logs
|
||||
func (c *Controller) ClusterLogs(team, namespace, name string) ([]*spec.LogEntry, error) {
|
||||
func (c *Controller) ClusterLogs(namespace, name string) ([]*spec.LogEntry, error) {
|
||||
|
||||
clusterName := spec.NamespacedName{
|
||||
Namespace: namespace,
|
||||
Name: team + "-" + name,
|
||||
Name: name,
|
||||
}
|
||||
|
||||
c.clustersMu.RLock()
|
||||
|
|
@ -215,11 +215,11 @@ func (c *Controller) WorkerStatus(workerID uint32) (*cluster.WorkerStatus, error
|
|||
}
|
||||
|
||||
// ClusterHistory dumps history of cluster changes
|
||||
func (c *Controller) ClusterHistory(team, namespace, name string) ([]*spec.Diff, error) {
|
||||
func (c *Controller) ClusterHistory(namespace, name string) ([]*spec.Diff, error) {
|
||||
|
||||
clusterName := spec.NamespacedName{
|
||||
Namespace: namespace,
|
||||
Name: team + "-" + name,
|
||||
Name: name,
|
||||
}
|
||||
|
||||
c.clustersMu.RLock()
|
||||
|
|
|
|||
|
|
@ -36,12 +36,14 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.EnableLazySpiloUpgrade = fromCRD.EnableLazySpiloUpgrade
|
||||
result.EnablePgVersionEnvVar = fromCRD.EnablePgVersionEnvVar
|
||||
result.EnableSpiloWalPathCompat = fromCRD.EnableSpiloWalPathCompat
|
||||
result.EnableTeamIdClusternamePrefix = fromCRD.EnableTeamIdClusternamePrefix
|
||||
result.EtcdHost = fromCRD.EtcdHost
|
||||
result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps
|
||||
result.DockerImage = util.Coalesce(fromCRD.DockerImage, "registry.opensource.zalan.do/acid/spilo-14:2.1-p6")
|
||||
result.DockerImage = util.Coalesce(fromCRD.DockerImage, "ghcr.io/zalando/spilo-15:2.1-p9")
|
||||
result.Workers = util.CoalesceUInt32(fromCRD.Workers, 8)
|
||||
result.MinInstances = fromCRD.MinInstances
|
||||
result.MaxInstances = fromCRD.MaxInstances
|
||||
result.IgnoreInstanceLimitsAnnotationKey = fromCRD.IgnoreInstanceLimitsAnnotationKey
|
||||
result.ResyncPeriod = util.CoalesceDuration(time.Duration(fromCRD.ResyncPeriod), "30m")
|
||||
result.RepairPeriod = util.CoalesceDuration(time.Duration(fromCRD.RepairPeriod), "5m")
|
||||
result.SetMemoryRequestToLimit = fromCRD.SetMemoryRequestToLimit
|
||||
|
|
@ -60,8 +62,8 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
// major version upgrade config
|
||||
result.MajorVersionUpgradeMode = util.Coalesce(fromCRD.MajorVersionUpgrade.MajorVersionUpgradeMode, "off")
|
||||
result.MajorVersionUpgradeTeamAllowList = fromCRD.MajorVersionUpgrade.MajorVersionUpgradeTeamAllowList
|
||||
result.MinimalMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.MinimalMajorVersion, "9.6")
|
||||
result.TargetMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.TargetMajorVersion, "14")
|
||||
result.MinimalMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.MinimalMajorVersion, "11")
|
||||
result.TargetMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.TargetMajorVersion, "15")
|
||||
|
||||
// kubernetes config
|
||||
result.CustomPodAnnotations = fromCRD.Kubernetes.CustomPodAnnotations
|
||||
|
|
@ -84,6 +86,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.StorageResizeMode = util.Coalesce(fromCRD.Kubernetes.StorageResizeMode, "pvc")
|
||||
result.EnableInitContainers = util.CoalesceBool(fromCRD.Kubernetes.EnableInitContainers, util.True())
|
||||
result.EnableSidecars = util.CoalesceBool(fromCRD.Kubernetes.EnableSidecars, util.True())
|
||||
result.SharePgSocketWithSidecars = util.CoalesceBool(fromCRD.Kubernetes.SharePgSocketWithSidecars, util.False())
|
||||
result.SecretNameTemplate = fromCRD.Kubernetes.SecretNameTemplate
|
||||
result.OAuthTokenSecretName = fromCRD.Kubernetes.OAuthTokenSecretName
|
||||
result.EnableCrossNamespaceSecret = fromCRD.Kubernetes.EnableCrossNamespaceSecret
|
||||
|
|
@ -116,9 +119,11 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.NodeReadinessLabelMerge = fromCRD.Kubernetes.NodeReadinessLabelMerge
|
||||
result.PodPriorityClassName = fromCRD.Kubernetes.PodPriorityClassName
|
||||
result.PodManagementPolicy = util.Coalesce(fromCRD.Kubernetes.PodManagementPolicy, "ordered_ready")
|
||||
result.EnableReadinessProbe = fromCRD.Kubernetes.EnableReadinessProbe
|
||||
result.MasterPodMoveTimeout = util.CoalesceDuration(time.Duration(fromCRD.Kubernetes.MasterPodMoveTimeout), "10m")
|
||||
result.EnablePodAntiAffinity = fromCRD.Kubernetes.EnablePodAntiAffinity
|
||||
result.PodAntiAffinityTopologyKey = util.Coalesce(fromCRD.Kubernetes.PodAntiAffinityTopologyKey, "kubernetes.io/hostname")
|
||||
result.PodAntiAffinityPreferredDuringScheduling = fromCRD.Kubernetes.PodAntiAffinityPreferredDuringScheduling
|
||||
result.PodToleration = fromCRD.Kubernetes.PodToleration
|
||||
|
||||
// Postgres Pod resources
|
||||
|
|
@ -128,6 +133,8 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.DefaultMemoryLimit = util.Coalesce(fromCRD.PostgresPodResources.DefaultMemoryLimit, "500Mi")
|
||||
result.MinCPULimit = util.Coalesce(fromCRD.PostgresPodResources.MinCPULimit, "250m")
|
||||
result.MinMemoryLimit = util.Coalesce(fromCRD.PostgresPodResources.MinMemoryLimit, "250Mi")
|
||||
result.MaxCPURequest = fromCRD.PostgresPodResources.MaxCPURequest
|
||||
result.MaxMemoryRequest = fromCRD.PostgresPodResources.MaxMemoryRequest
|
||||
|
||||
// timeout config
|
||||
result.ResourceCheckInterval = util.CoalesceDuration(time.Duration(fromCRD.Timeouts.ResourceCheckInterval), "3s")
|
||||
|
|
@ -147,7 +154,9 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.EnableReplicaPoolerLoadBalancer = fromCRD.LoadBalancer.EnableReplicaPoolerLoadBalancer
|
||||
result.CustomServiceAnnotations = fromCRD.LoadBalancer.CustomServiceAnnotations
|
||||
result.MasterDNSNameFormat = fromCRD.LoadBalancer.MasterDNSNameFormat
|
||||
result.MasterLegacyDNSNameFormat = fromCRD.LoadBalancer.MasterLegacyDNSNameFormat
|
||||
result.ReplicaDNSNameFormat = fromCRD.LoadBalancer.ReplicaDNSNameFormat
|
||||
result.ReplicaLegacyDNSNameFormat = fromCRD.LoadBalancer.ReplicaLegacyDNSNameFormat
|
||||
result.ExternalTrafficPolicy = util.Coalesce(fromCRD.LoadBalancer.ExternalTrafficPolicy, "Cluster")
|
||||
|
||||
// AWS or GCP config
|
||||
|
|
@ -165,8 +174,11 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
|
||||
// logical backup config
|
||||
result.LogicalBackupSchedule = util.Coalesce(fromCRD.LogicalBackup.Schedule, "30 00 * * *")
|
||||
result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "registry.opensource.zalan.do/acid/logical-backup:v1.8.2")
|
||||
result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "registry.opensource.zalan.do/acid/logical-backup:v1.9.0")
|
||||
result.LogicalBackupProvider = util.Coalesce(fromCRD.LogicalBackup.BackupProvider, "s3")
|
||||
result.LogicalBackupAzureStorageAccountName = fromCRD.LogicalBackup.AzureStorageAccountName
|
||||
result.LogicalBackupAzureStorageAccountKey = fromCRD.LogicalBackup.AzureStorageAccountKey
|
||||
result.LogicalBackupAzureStorageContainer = fromCRD.LogicalBackup.AzureStorageContainer
|
||||
result.LogicalBackupS3Bucket = fromCRD.LogicalBackup.S3Bucket
|
||||
result.LogicalBackupS3Region = fromCRD.LogicalBackup.S3Region
|
||||
result.LogicalBackupS3Endpoint = fromCRD.LogicalBackup.S3Endpoint
|
||||
|
|
@ -176,6 +188,10 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.LogicalBackupS3RetentionTime = fromCRD.LogicalBackup.RetentionTime
|
||||
result.LogicalBackupGoogleApplicationCredentials = fromCRD.LogicalBackup.GoogleApplicationCredentials
|
||||
result.LogicalBackupJobPrefix = util.Coalesce(fromCRD.LogicalBackup.JobPrefix, "logical-backup-")
|
||||
result.LogicalBackupCPURequest = fromCRD.LogicalBackup.CPURequest
|
||||
result.LogicalBackupMemoryRequest = fromCRD.LogicalBackup.MemoryRequest
|
||||
result.LogicalBackupCPULimit = fromCRD.LogicalBackup.CPULimit
|
||||
result.LogicalBackupMemoryLimit = fromCRD.LogicalBackup.MemoryLimit
|
||||
|
||||
// debug config
|
||||
result.DebugLogging = fromCRD.OperatorDebug.DebugLogging
|
||||
|
|
@ -211,6 +227,9 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
|
|||
result.ScalyrCPULimit = fromCRD.Scalyr.ScalyrCPULimit
|
||||
result.ScalyrMemoryLimit = fromCRD.Scalyr.ScalyrMemoryLimit
|
||||
|
||||
// Patroni config
|
||||
result.EnablePatroniFailsafeMode = util.CoalesceBool(fromCRD.Patroni.FailsafeMode, util.False())
|
||||
|
||||
// Connection pooler. Looks like we can't use defaulting in CRD before 1.17,
|
||||
// so ensure default values here.
|
||||
result.ConnectionPooler.NumberOfInstances = util.CoalesceInt32(
|
||||
|
|
|
|||
|
|
@ -158,7 +158,14 @@ func (c *Controller) acquireInitialListOfClusters() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Controller) addCluster(lg *logrus.Entry, clusterName spec.NamespacedName, pgSpec *acidv1.Postgresql) *cluster.Cluster {
|
||||
func (c *Controller) addCluster(lg *logrus.Entry, clusterName spec.NamespacedName, pgSpec *acidv1.Postgresql) (*cluster.Cluster, error) {
|
||||
if c.opConfig.EnableTeamIdClusternamePrefix {
|
||||
if _, err := acidv1.ExtractClusterName(clusterName.Name, pgSpec.Spec.TeamID); err != nil {
|
||||
c.KubeClient.SetPostgresCRDStatus(clusterName, acidv1.ClusterStatusInvalid)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
cl := cluster.New(c.makeClusterConfig(), c.KubeClient, *pgSpec, lg, c.eventRecorder)
|
||||
cl.Run(c.stopCh)
|
||||
teamName := strings.ToLower(cl.Spec.TeamID)
|
||||
|
|
@ -171,12 +178,13 @@ func (c *Controller) addCluster(lg *logrus.Entry, clusterName spec.NamespacedNam
|
|||
c.clusterLogs[clusterName] = ringlog.New(c.opConfig.RingLogLines)
|
||||
c.clusterHistory[clusterName] = ringlog.New(c.opConfig.ClusterHistoryEntries)
|
||||
|
||||
return cl
|
||||
return cl, nil
|
||||
}
|
||||
|
||||
func (c *Controller) processEvent(event ClusterEvent) {
|
||||
var clusterName spec.NamespacedName
|
||||
var clHistory ringlog.RingLogger
|
||||
var err error
|
||||
|
||||
lg := c.logger.WithField("worker", event.WorkerID)
|
||||
|
||||
|
|
@ -216,7 +224,7 @@ func (c *Controller) processEvent(event ClusterEvent) {
|
|||
c.mergeDeprecatedPostgreSQLSpecParameters(&event.NewSpec.Spec)
|
||||
}
|
||||
|
||||
if err := c.submitRBACCredentials(event); err != nil {
|
||||
if err = c.submitRBACCredentials(event); err != nil {
|
||||
c.logger.Warnf("pods and/or Patroni may misfunction due to the lack of permissions: %v", err)
|
||||
}
|
||||
|
||||
|
|
@ -231,15 +239,20 @@ func (c *Controller) processEvent(event ClusterEvent) {
|
|||
|
||||
lg.Infof("creating a new Postgres cluster")
|
||||
|
||||
cl = c.addCluster(lg, clusterName, event.NewSpec)
|
||||
cl, err = c.addCluster(lg, clusterName, event.NewSpec)
|
||||
if err != nil {
|
||||
lg.Errorf("creation of cluster is blocked: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
c.curWorkerCluster.Store(event.WorkerID, cl)
|
||||
|
||||
if err := cl.Create(); err != nil {
|
||||
err = cl.Create()
|
||||
if err != nil {
|
||||
cl.Status = acidv1.PostgresStatus{PostgresClusterStatus: acidv1.ClusterStatusInvalid}
|
||||
cl.Error = fmt.Sprintf("could not create cluster: %v", err)
|
||||
lg.Error(cl.Error)
|
||||
c.eventRecorder.Eventf(cl.GetReference(), v1.EventTypeWarning, "Create", "%v", cl.Error)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -252,7 +265,8 @@ func (c *Controller) processEvent(event ClusterEvent) {
|
|||
return
|
||||
}
|
||||
c.curWorkerCluster.Store(event.WorkerID, cl)
|
||||
if err := cl.Update(event.OldSpec, event.NewSpec); err != nil {
|
||||
err = cl.Update(event.OldSpec, event.NewSpec)
|
||||
if err != nil {
|
||||
cl.Error = fmt.Sprintf("could not update cluster: %v", err)
|
||||
lg.Error(cl.Error)
|
||||
|
||||
|
|
@ -303,11 +317,16 @@ func (c *Controller) processEvent(event ClusterEvent) {
|
|||
|
||||
// no race condition because a cluster is always processed by single worker
|
||||
if !clusterFound {
|
||||
cl = c.addCluster(lg, clusterName, event.NewSpec)
|
||||
cl, err = c.addCluster(lg, clusterName, event.NewSpec)
|
||||
if err != nil {
|
||||
lg.Errorf("syncing of cluster is blocked: %v", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
c.curWorkerCluster.Store(event.WorkerID, cl)
|
||||
if err := cl.Sync(event.NewSpec); err != nil {
|
||||
err = cl.Sync(event.NewSpec)
|
||||
if err != nil {
|
||||
cl.Error = fmt.Sprintf("could not sync cluster: %v", err)
|
||||
c.eventRecorder.Eventf(cl.GetReference(), v1.EventTypeWarning, "Sync", "%v", cl.Error)
|
||||
lg.Error(cl.Error)
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2022 Compose, Zalando SE
|
||||
Copyright 2023 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
@ -26,6 +26,7 @@ package versioned
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
acidv1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/acid.zalan.do/v1"
|
||||
zalandov1 "github.com/zalando/postgres-operator/pkg/generated/clientset/versioned/typed/zalando.org/v1"
|
||||
|
|
@ -69,26 +70,45 @@ func (c *Clientset) Discovery() discovery.DiscoveryInterface {
|
|||
// NewForConfig creates a new Clientset for the given config.
|
||||
// If config's RateLimiter is not set and QPS and Burst are acceptable,
|
||||
// NewForConfig will generate a rate-limiter in configShallowCopy.
|
||||
// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient),
|
||||
// where httpClient was generated with rest.HTTPClientFor(c).
|
||||
func NewForConfig(c *rest.Config) (*Clientset, error) {
|
||||
configShallowCopy := *c
|
||||
|
||||
// share the transport between all clients
|
||||
httpClient, err := rest.HTTPClientFor(&configShallowCopy)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return NewForConfigAndClient(&configShallowCopy, httpClient)
|
||||
}
|
||||
|
||||
// NewForConfigAndClient creates a new Clientset for the given config and http client.
|
||||
// Note the http client provided takes precedence over the configured transport values.
|
||||
// If config's RateLimiter is not set and QPS and Burst are acceptable,
|
||||
// NewForConfigAndClient will generate a rate-limiter in configShallowCopy.
|
||||
func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) {
|
||||
configShallowCopy := *c
|
||||
if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 {
|
||||
if configShallowCopy.Burst <= 0 {
|
||||
return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0")
|
||||
}
|
||||
configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst)
|
||||
}
|
||||
|
||||
var cs Clientset
|
||||
var err error
|
||||
cs.acidV1, err = acidv1.NewForConfig(&configShallowCopy)
|
||||
cs.acidV1, err = acidv1.NewForConfigAndClient(&configShallowCopy, httpClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cs.zalandoV1, err = zalandov1.NewForConfig(&configShallowCopy)
|
||||
cs.zalandoV1, err = zalandov1.NewForConfigAndClient(&configShallowCopy, httpClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy)
|
||||
cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -98,12 +118,11 @@ func NewForConfig(c *rest.Config) (*Clientset, error) {
|
|||
// NewForConfigOrDie creates a new Clientset for the given config and
|
||||
// panics if there is an error in the config.
|
||||
func NewForConfigOrDie(c *rest.Config) *Clientset {
|
||||
var cs Clientset
|
||||
cs.acidV1 = acidv1.NewForConfigOrDie(c)
|
||||
cs.zalandoV1 = zalandov1.NewForConfigOrDie(c)
|
||||
|
||||
cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c)
|
||||
return &cs
|
||||
cs, err := NewForConfig(c)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return cs
|
||||
}
|
||||
|
||||
// New creates a new Clientset for the given RESTClient.
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2022 Compose, Zalando SE
|
||||
Copyright 2023 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2022 Compose, Zalando SE
|
||||
Copyright 2023 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2022 Compose, Zalando SE
|
||||
Copyright 2023 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2022 Compose, Zalando SE
|
||||
Copyright 2023 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
Copyright 2022 Compose, Zalando SE
|
||||
Copyright 2023 Compose, Zalando SE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue