Compare commits

..

No commits in common. "master" and "v1.13.0" have entirely different histories.

143 changed files with 1225 additions and 2862 deletions

View File

@ -23,7 +23,7 @@ jobs:
- uses: actions/setup-go@v2 - uses: actions/setup-go@v2
with: with:
go-version: "^1.25.3" go-version: "^1.22.5"
- name: Run unit tests - name: Run unit tests
run: make deps mocks test run: make deps mocks test

View File

@ -14,7 +14,7 @@ jobs:
- uses: actions/checkout@v1 - uses: actions/checkout@v1
- uses: actions/setup-go@v2 - uses: actions/setup-go@v2
with: with:
go-version: "^1.25.3" go-version: "^1.22.5"
- name: Make dependencies - name: Make dependencies
run: make deps mocks run: make deps mocks
- name: Code generation - name: Code generation

View File

@ -14,7 +14,7 @@ jobs:
- uses: actions/checkout@v2 - uses: actions/checkout@v2
- uses: actions/setup-go@v2 - uses: actions/setup-go@v2
with: with:
go-version: "^1.25.3" go-version: "^1.22.5"
- name: Make dependencies - name: Make dependencies
run: make deps mocks run: make deps mocks
- name: Compile - name: Compile
@ -22,7 +22,7 @@ jobs:
- name: Run unit tests - name: Run unit tests
run: go test -race -covermode atomic -coverprofile=coverage.out ./... run: go test -race -covermode atomic -coverprofile=coverage.out ./...
- name: Convert coverage to lcov - name: Convert coverage to lcov
uses: jandelgado/gcov2lcov-action@v1.1.1 uses: jandelgado/gcov2lcov-action@v1.0.9
- name: Coveralls - name: Coveralls
uses: coverallsapp/github-action@master uses: coverallsapp/github-action@master
with: with:

2
.gitignore vendored
View File

@ -104,5 +104,3 @@ e2e/tls
mocks mocks
ui/.npm/ ui/.npm/
.DS_Store

View File

@ -1,2 +1,2 @@
# global owners # global owners
* @sdudoladov @Jan-M @FxKu @jopadi @idanovinda @hughcapet * @sdudoladov @Jan-M @FxKu @jopadi @idanovinda @hughcapet @macedigital

View File

@ -1,6 +1,6 @@
The MIT License (MIT) The MIT License (MIT)
Copyright (c) 2025 Zalando SE Copyright (c) 2024 Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -4,3 +4,4 @@ Jan Mussler <jan.mussler@zalando.de>
Jociele Padilha <jociele.padilha@zalando.de> Jociele Padilha <jociele.padilha@zalando.de>
Ida Novindasari <ida.novindasari@zalando.de> Ida Novindasari <ida.novindasari@zalando.de>
Polina Bungina <polina.bungina@zalando.de> Polina Bungina <polina.bungina@zalando.de>
Matthias Adler <matthias.adler@zalando.de>

View File

@ -43,7 +43,7 @@ ifndef GOPATH
endif endif
PATH := $(GOPATH)/bin:$(PATH) PATH := $(GOPATH)/bin:$(PATH)
SHELL := env PATH="$(PATH)" $(SHELL) SHELL := env PATH=$(PATH) $(SHELL)
default: local default: local
@ -69,7 +69,7 @@ docker: ${DOCKERDIR}/${DOCKERFILE}
docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_FRESH)$(DEBUG_POSTFIX)" -f "${DOCKERDIR}/${DOCKERFILE}" --build-arg VERSION="${VERSION}" . docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_FRESH)$(DEBUG_POSTFIX)" -f "${DOCKERDIR}/${DOCKERFILE}" --build-arg VERSION="${VERSION}" .
indocker-race: indocker-race:
docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.25.3 bash -c "make linux" docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.22.5 bash -c "make linux"
push: push:
docker push "$(IMAGE):$(TAG)$(CDP_TAG)" docker push "$(IMAGE):$(TAG)$(CDP_TAG)"
@ -78,7 +78,7 @@ mocks:
GO111MODULE=on go generate ./... GO111MODULE=on go generate ./...
tools: tools:
GO111MODULE=on go get k8s.io/client-go@kubernetes-1.32.9 GO111MODULE=on go get -d k8s.io/client-go@kubernetes-1.28.12
GO111MODULE=on go install github.com/golang/mock/mockgen@v1.6.0 GO111MODULE=on go install github.com/golang/mock/mockgen@v1.6.0
GO111MODULE=on go mod tidy GO111MODULE=on go mod tidy

View File

@ -17,7 +17,6 @@ pipelines with no access to Kubernetes API directly, promoting infrastructure as
* Live volume resize without pod restarts (AWS EBS, PVC) * Live volume resize without pod restarts (AWS EBS, PVC)
* Database connection pooling with PGBouncer * Database connection pooling with PGBouncer
* Support fast in place major version upgrade. Supports global upgrade of all clusters. * Support fast in place major version upgrade. Supports global upgrade of all clusters.
* Pod protection during boostrap phase and configurable maintenance windows
* Restore and cloning Postgres clusters on AWS, GCS and Azure * Restore and cloning Postgres clusters on AWS, GCS and Azure
* Additionally logical backups to S3 or GCS bucket can be configured * Additionally logical backups to S3 or GCS bucket can be configured
* Standby cluster from S3 or GCS WAL archive * Standby cluster from S3 or GCS WAL archive
@ -29,30 +28,25 @@ pipelines with no access to Kubernetes API directly, promoting infrastructure as
### PostgreSQL features ### PostgreSQL features
* Supports PostgreSQL 17, starting from 13+ * Supports PostgreSQL 16, starting from 12+
* Streaming replication cluster via Patroni * Streaming replication cluster via Patroni
* Point-In-Time-Recovery with * Point-In-Time-Recovery with
[pg_basebackup](https://www.postgresql.org/docs/17/app-pgbasebackup.html) / [pg_basebackup](https://www.postgresql.org/docs/16/app-pgbasebackup.html) /
[WAL-G](https://github.com/wal-g/wal-g) or [WAL-E](https://github.com/wal-e/wal-e) via [Spilo](https://github.com/zalando/spilo) [WAL-E](https://github.com/wal-e/wal-e) via [Spilo](https://github.com/zalando/spilo)
* Preload libraries: [bg_mon](https://github.com/CyberDem0n/bg_mon), * Preload libraries: [bg_mon](https://github.com/CyberDem0n/bg_mon),
[pg_stat_statements](https://www.postgresql.org/docs/17/pgstatstatements.html), [pg_stat_statements](https://www.postgresql.org/docs/16/pgstatstatements.html),
[pgextwlist](https://github.com/dimitri/pgextwlist), [pgextwlist](https://github.com/dimitri/pgextwlist),
[pg_auth_mon](https://github.com/RafiaSabih/pg_auth_mon) [pg_auth_mon](https://github.com/RafiaSabih/pg_auth_mon)
* Incl. popular Postgres extensions such as * Incl. popular Postgres extensions such as
[decoderbufs](https://github.com/debezium/postgres-decoderbufs), [decoderbufs](https://github.com/debezium/postgres-decoderbufs),
[hypopg](https://github.com/HypoPG/hypopg), [hypopg](https://github.com/HypoPG/hypopg),
[pg_cron](https://github.com/citusdata/pg_cron), [pg_cron](https://github.com/citusdata/pg_cron),
[pg_repack](https://github.com/reorg/pg_repack),
[pg_partman](https://github.com/pgpartman/pg_partman), [pg_partman](https://github.com/pgpartman/pg_partman),
[pg_stat_kcache](https://github.com/powa-team/pg_stat_kcache), [pg_stat_kcache](https://github.com/powa-team/pg_stat_kcache),
[pg_audit](https://github.com/pgaudit/pgaudit),
[pgfaceting](https://github.com/cybertec-postgresql/pgfaceting),
[pgq](https://github.com/pgq/pgq), [pgq](https://github.com/pgq/pgq),
[pgvector](https://github.com/pgvector/pgvector), [pgvector](https://github.com/pgvector/pgvector),
[plpgsql_check](https://github.com/okbob/plpgsql_check), [plpgsql_check](https://github.com/okbob/plpgsql_check),
[plproxy](https://github.com/plproxy/plproxy),
[postgis](https://postgis.net/), [postgis](https://postgis.net/),
[roaringbitmap](https://github.com/ChenHuajun/pg_roaringbitmap),
[set_user](https://github.com/pgaudit/set_user) and [set_user](https://github.com/pgaudit/set_user) and
[timescaledb](https://github.com/timescale/timescaledb) [timescaledb](https://github.com/timescale/timescaledb)
@ -63,12 +57,12 @@ production for over five years.
| Release | Postgres versions | K8s versions | Golang | | Release | Postgres versions | K8s versions | Golang |
| :-------- | :---------------: | :---------------: | :-----: | | :-------- | :---------------: | :---------------: | :-----: |
| v1.15.0 | 13 &rarr; 17 | 1.27+ | 1.25.3 |
| v1.14.0 | 13 &rarr; 17 | 1.27+ | 1.23.4 |
| v1.13.0 | 12 &rarr; 16 | 1.27+ | 1.22.5 | | v1.13.0 | 12 &rarr; 16 | 1.27+ | 1.22.5 |
| v1.12.0 | 11 &rarr; 16 | 1.27+ | 1.22.3 | | v1.12.0 | 11 &rarr; 16 | 1.27+ | 1.22.3 |
| v1.11.0 | 11 &rarr; 16 | 1.27+ | 1.21.7 | | v1.11.0 | 11 &rarr; 16 | 1.27+ | 1.21.7 |
| v1.10.1 | 10 &rarr; 15 | 1.21+ | 1.19.8 | | v1.10.1 | 10 &rarr; 15 | 1.21+ | 1.19.8 |
| v1.9.0 | 10 &rarr; 15 | 1.21+ | 1.18.9 |
| v1.8.2 | 9.5 &rarr; 14 | 1.20 &rarr; 1.24 | 1.17.4 |
## Getting started ## Getting started

View File

@ -1,7 +1,7 @@
apiVersion: v2 apiVersion: v2
name: postgres-operator-ui name: postgres-operator-ui
version: 1.15.0 version: 1.13.0
appVersion: 1.15.0 appVersion: 1.13.0
home: https://github.com/zalando/postgres-operator home: https://github.com/zalando/postgres-operator
description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience
keywords: keywords:

View File

@ -1,55 +1,9 @@
apiVersion: v1 apiVersion: v1
entries: entries:
postgres-operator-ui: postgres-operator-ui:
- apiVersion: v2
appVersion: 1.15.0
created: "2025-10-16T11:34:57.912432565+02:00"
description: Postgres Operator UI provides a graphical interface for a convenient
database-as-a-service user experience
digest: d82b5fb7c3d4fd8b106343b2f9472cba5e6050315ab3c520a79366f2b2f20c7a
home: https://github.com/zalando/postgres-operator
keywords:
- postgres
- operator
- ui
- cloud-native
- patroni
- spilo
maintainers:
- email: opensource@zalando.de
name: Zalando
name: postgres-operator-ui
sources:
- https://github.com/zalando/postgres-operator
urls:
- postgres-operator-ui-1.15.0.tgz
version: 1.15.0
- apiVersion: v2
appVersion: 1.14.0
created: "2025-10-16T11:34:57.906677165+02:00"
description: Postgres Operator UI provides a graphical interface for a convenient
database-as-a-service user experience
digest: e87ed898079a852957a67a4caf3fbd27b9098e413f5d961b7a771a6ae8b3e17c
home: https://github.com/zalando/postgres-operator
keywords:
- postgres
- operator
- ui
- cloud-native
- patroni
- spilo
maintainers:
- email: opensource@zalando.de
name: Zalando
name: postgres-operator-ui
sources:
- https://github.com/zalando/postgres-operator
urls:
- postgres-operator-ui-1.14.0.tgz
version: 1.14.0
- apiVersion: v2 - apiVersion: v2
appVersion: 1.13.0 appVersion: 1.13.0
created: "2025-10-16T11:34:57.904106882+02:00" created: "2024-08-21T18:55:36.524305158+02:00"
description: Postgres Operator UI provides a graphical interface for a convenient description: Postgres Operator UI provides a graphical interface for a convenient
database-as-a-service user experience database-as-a-service user experience
digest: e0444e516b50f82002d1a733527813c51759a627cefdd1005cea73659f824ea8 digest: e0444e516b50f82002d1a733527813c51759a627cefdd1005cea73659f824ea8
@ -72,7 +26,7 @@ entries:
version: 1.13.0 version: 1.13.0
- apiVersion: v2 - apiVersion: v2
appVersion: 1.12.2 appVersion: 1.12.2
created: "2025-10-16T11:34:57.901526106+02:00" created: "2024-08-21T18:55:36.521875733+02:00"
description: Postgres Operator UI provides a graphical interface for a convenient description: Postgres Operator UI provides a graphical interface for a convenient
database-as-a-service user experience database-as-a-service user experience
digest: cbcef400c23ccece27d97369ad629278265c013e0a45c0b7f33e7568a082fedd digest: cbcef400c23ccece27d97369ad629278265c013e0a45c0b7f33e7568a082fedd
@ -95,7 +49,7 @@ entries:
version: 1.12.2 version: 1.12.2
- apiVersion: v2 - apiVersion: v2
appVersion: 1.11.0 appVersion: 1.11.0
created: "2025-10-16T11:34:57.898843691+02:00" created: "2024-08-21T18:55:36.51959105+02:00"
description: Postgres Operator UI provides a graphical interface for a convenient description: Postgres Operator UI provides a graphical interface for a convenient
database-as-a-service user experience database-as-a-service user experience
digest: a45f2284045c2a9a79750a36997386444f39b01ac722b17c84b431457577a3a2 digest: a45f2284045c2a9a79750a36997386444f39b01ac722b17c84b431457577a3a2
@ -118,7 +72,7 @@ entries:
version: 1.11.0 version: 1.11.0
- apiVersion: v2 - apiVersion: v2
appVersion: 1.10.1 appVersion: 1.10.1
created: "2025-10-16T11:34:57.896283083+02:00" created: "2024-08-21T18:55:36.516518177+02:00"
description: Postgres Operator UI provides a graphical interface for a convenient description: Postgres Operator UI provides a graphical interface for a convenient
database-as-a-service user experience database-as-a-service user experience
digest: 2e5e7a82aebee519ec57c6243eb8735124aa4585a3a19c66ffd69638fbeb11ce digest: 2e5e7a82aebee519ec57c6243eb8735124aa4585a3a19c66ffd69638fbeb11ce
@ -139,4 +93,27 @@ entries:
urls: urls:
- postgres-operator-ui-1.10.1.tgz - postgres-operator-ui-1.10.1.tgz
version: 1.10.1 version: 1.10.1
generated: "2025-10-16T11:34:57.893034861+02:00" - apiVersion: v2
appVersion: 1.9.0
created: "2024-08-21T18:55:36.52712908+02:00"
description: Postgres Operator UI provides a graphical interface for a convenient
database-as-a-service user experience
digest: df434af6c8b697fe0631017ecc25e3c79e125361ae6622347cea41a545153bdc
home: https://github.com/zalando/postgres-operator
keywords:
- postgres
- operator
- ui
- cloud-native
- patroni
- spilo
maintainers:
- email: opensource@zalando.de
name: Zalando
name: postgres-operator-ui
sources:
- https://github.com/zalando/postgres-operator
urls:
- postgres-operator-ui-1.9.0.tgz
version: 1.9.0
generated: "2024-08-21T18:55:36.512456099+02:00"

View File

@ -9,7 +9,7 @@ metadata:
name: {{ template "postgres-operator-ui.fullname" . }} name: {{ template "postgres-operator-ui.fullname" . }}
namespace: {{ .Release.Namespace }} namespace: {{ .Release.Namespace }}
spec: spec:
replicas: {{ .Values.replicaCount }} replicas: 1
selector: selector:
matchLabels: matchLabels:
app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }} app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }}
@ -84,11 +84,11 @@ spec:
"limit_iops": 16000, "limit_iops": 16000,
"limit_throughput": 1000, "limit_throughput": 1000,
"postgresql_versions": [ "postgresql_versions": [
"17",
"16", "16",
"15", "15",
"14", "14",
"13" "13",
"12"
] ]
} }
{{- if .Values.extraEnvs }} {{- if .Values.extraEnvs }}

View File

@ -8,7 +8,7 @@ replicaCount: 1
image: image:
registry: ghcr.io registry: ghcr.io
repository: zalando/postgres-operator-ui repository: zalando/postgres-operator-ui
tag: v1.15.0 tag: v1.13.0
pullPolicy: "IfNotPresent" pullPolicy: "IfNotPresent"
# Optionally specify an array of imagePullSecrets. # Optionally specify an array of imagePullSecrets.
@ -62,6 +62,8 @@ podAnnotations:
extraEnvs: extraEnvs:
[] []
# Exemple of settings to make snapshot view working in the ui when using AWS # Exemple of settings to make snapshot view working in the ui when using AWS
# - name: WALE_S3_ENDPOINT
# value: https+path://s3.us-east-1.amazonaws.com:443
# - name: SPILO_S3_BACKUP_PREFIX # - name: SPILO_S3_BACKUP_PREFIX
# value: spilo/ # value: spilo/
# - name: AWS_ACCESS_KEY_ID # - name: AWS_ACCESS_KEY_ID
@ -81,6 +83,8 @@ extraEnvs:
# key: AWS_DEFAULT_REGION # key: AWS_DEFAULT_REGION
# - name: SPILO_S3_BACKUP_BUCKET # - name: SPILO_S3_BACKUP_BUCKET
# value: <s3 bucket used by the operator> # value: <s3 bucket used by the operator>
# - name: "USE_AWS_INSTANCE_PROFILE"
# value: "true"
# configure UI service # configure UI service
service: service:

View File

@ -1,7 +1,7 @@
apiVersion: v2 apiVersion: v2
name: postgres-operator name: postgres-operator
version: 1.15.0 version: 1.13.0
appVersion: 1.15.0 appVersion: 1.13.0
home: https://github.com/zalando/postgres-operator home: https://github.com/zalando/postgres-operator
description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes
keywords: keywords:

View File

@ -68,7 +68,7 @@ spec:
type: string type: string
docker_image: docker_image:
type: string type: string
default: "ghcr.io/zalando/spilo-17:4.0-p3" default: "ghcr.io/zalando/spilo-16:3.3-p1"
enable_crd_registration: enable_crd_registration:
type: boolean type: boolean
default: true default: true
@ -167,10 +167,10 @@ spec:
type: string type: string
minimal_major_version: minimal_major_version:
type: string type: string
default: "13" default: "12"
target_major_version: target_major_version:
type: string type: string
default: "17" default: "16"
kubernetes: kubernetes:
type: object type: object
properties: properties:
@ -376,28 +376,28 @@ spec:
properties: properties:
default_cpu_limit: default_cpu_limit:
type: string type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$|^$' pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
default_cpu_request: default_cpu_request:
type: string type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$|^$' pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
default_memory_limit: default_memory_limit:
type: string type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$|^$' pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
default_memory_request: default_memory_request:
type: string type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$|^$' pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
max_cpu_request: max_cpu_request:
type: string type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$|^$' pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
max_memory_request: max_memory_request:
type: string type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$|^$' pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
min_cpu_limit: min_cpu_limit:
type: string type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$|^$' pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
min_memory_limit: min_memory_limit:
type: string type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$|^$' pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
timeouts: timeouts:
type: object type: object
properties: properties:

View File

@ -375,11 +375,11 @@ spec:
version: version:
type: string type: string
enum: enum:
- "12"
- "13" - "13"
- "14" - "14"
- "15" - "15"
- "16" - "16"
- "17"
parameters: parameters:
type: object type: object
additionalProperties: additionalProperties:
@ -514,9 +514,6 @@ spec:
type: string type: string
batchSize: batchSize:
type: integer type: integer
cpu:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
database: database:
type: string type: string
enableRecovery: enableRecovery:
@ -525,9 +522,6 @@ spec:
type: object type: object
additionalProperties: additionalProperties:
type: string type: string
memory:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
tables: tables:
type: object type: object
additionalProperties: additionalProperties:
@ -539,8 +533,6 @@ spec:
type: string type: string
idColumn: idColumn:
type: string type: string
ignoreRecovery:
type: boolean
payloadColumn: payloadColumn:
type: string type: string
recoveryEventType: recoveryEventType:

View File

@ -1,53 +1,9 @@
apiVersion: v1 apiVersion: v1
entries: entries:
postgres-operator: postgres-operator:
- apiVersion: v2
appVersion: 1.15.0
created: "2025-10-16T11:35:38.533627038+02:00"
description: Postgres Operator creates and manages PostgreSQL clusters running
in Kubernetes
digest: 002dd47647bf51fbba023bd1762d807be478cf37de7a44b80cd01ac1f20bd94a
home: https://github.com/zalando/postgres-operator
keywords:
- postgres
- operator
- cloud-native
- patroni
- spilo
maintainers:
- email: opensource@zalando.de
name: Zalando
name: postgres-operator
sources:
- https://github.com/zalando/postgres-operator
urls:
- postgres-operator-1.15.0.tgz
version: 1.15.0
- apiVersion: v2
appVersion: 1.14.0
created: "2025-10-16T11:35:38.52489216+02:00"
description: Postgres Operator creates and manages PostgreSQL clusters running
in Kubernetes
digest: 36e1571f3f455b213f16cdda7b1158648e8e84deb804ba47ed6b9b6d19263ba8
home: https://github.com/zalando/postgres-operator
keywords:
- postgres
- operator
- cloud-native
- patroni
- spilo
maintainers:
- email: opensource@zalando.de
name: Zalando
name: postgres-operator
sources:
- https://github.com/zalando/postgres-operator
urls:
- postgres-operator-1.14.0.tgz
version: 1.14.0
- apiVersion: v2 - apiVersion: v2
appVersion: 1.13.0 appVersion: 1.13.0
created: "2025-10-16T11:35:38.517347652+02:00" created: "2024-08-21T18:54:43.160735116+02:00"
description: Postgres Operator creates and manages PostgreSQL clusters running description: Postgres Operator creates and manages PostgreSQL clusters running
in Kubernetes in Kubernetes
digest: a839601689aea0a7e6bc0712a5244d435683cf3314c95794097ff08540e1dfef digest: a839601689aea0a7e6bc0712a5244d435683cf3314c95794097ff08540e1dfef
@ -69,7 +25,7 @@ entries:
version: 1.13.0 version: 1.13.0
- apiVersion: v2 - apiVersion: v2
appVersion: 1.12.2 appVersion: 1.12.2
created: "2025-10-16T11:35:38.510819005+02:00" created: "2024-08-21T18:54:43.152249286+02:00"
description: Postgres Operator creates and manages PostgreSQL clusters running description: Postgres Operator creates and manages PostgreSQL clusters running
in Kubernetes in Kubernetes
digest: 65858d14a40d7fd90c32bd9fc60021acc9555c161079f43a365c70171eaf21d8 digest: 65858d14a40d7fd90c32bd9fc60021acc9555c161079f43a365c70171eaf21d8
@ -91,7 +47,7 @@ entries:
version: 1.12.2 version: 1.12.2
- apiVersion: v2 - apiVersion: v2
appVersion: 1.11.0 appVersion: 1.11.0
created: "2025-10-16T11:35:38.503781253+02:00" created: "2024-08-21T18:54:43.145837894+02:00"
description: Postgres Operator creates and manages PostgreSQL clusters running description: Postgres Operator creates and manages PostgreSQL clusters running
in Kubernetes in Kubernetes
digest: 3914b5e117bda0834f05c9207f007e2ac372864cf6e86dcc2e1362bbe46c14d9 digest: 3914b5e117bda0834f05c9207f007e2ac372864cf6e86dcc2e1362bbe46c14d9
@ -113,7 +69,7 @@ entries:
version: 1.11.0 version: 1.11.0
- apiVersion: v2 - apiVersion: v2
appVersion: 1.10.1 appVersion: 1.10.1
created: "2025-10-16T11:35:38.494366224+02:00" created: "2024-08-21T18:54:43.139552116+02:00"
description: Postgres Operator creates and manages PostgreSQL clusters running description: Postgres Operator creates and manages PostgreSQL clusters running
in Kubernetes in Kubernetes
digest: cc3baa41753da92466223d0b334df27e79c882296577b404a8e9071411fcf19c digest: cc3baa41753da92466223d0b334df27e79c882296577b404a8e9071411fcf19c
@ -133,4 +89,26 @@ entries:
urls: urls:
- postgres-operator-1.10.1.tgz - postgres-operator-1.10.1.tgz
version: 1.10.1 version: 1.10.1
generated: "2025-10-16T11:35:38.487472753+02:00" - apiVersion: v2
appVersion: 1.9.0
created: "2024-08-21T18:54:43.168490032+02:00"
description: Postgres Operator creates and manages PostgreSQL clusters running
in Kubernetes
digest: 64df90c898ca591eb3a330328173ffaadfbf9ddd474d8c42ed143edc9e3f4276
home: https://github.com/zalando/postgres-operator
keywords:
- postgres
- operator
- cloud-native
- patroni
- spilo
maintainers:
- email: opensource@zalando.de
name: Zalando
name: postgres-operator
sources:
- https://github.com/zalando/postgres-operator
urls:
- postgres-operator-1.9.0.tgz
version: 1.9.0
generated: "2024-08-21T18:54:43.126871802+02:00"

Binary file not shown.

View File

@ -140,8 +140,8 @@ rules:
- delete - delete
- get - get
- list - list
{{- if toString .Values.configKubernetes.storage_resize_mode | eq "pvc" }}
- patch - patch
{{- if or (toString .Values.configKubernetes.storage_resize_mode | eq "pvc") (toString .Values.configKubernetes.storage_resize_mode | eq "mixed") }}
- update - update
{{- end }} {{- end }}
# to read existing PVs. Creation should be done via dynamic provisioning # to read existing PVs. Creation should be done via dynamic provisioning

View File

@ -54,7 +54,7 @@ spec:
value: {{ template "postgres-operator.controllerID" . }} value: {{ template "postgres-operator.controllerID" . }}
{{- end }} {{- end }}
{{- if .Values.extraEnvs }} {{- if .Values.extraEnvs }}
{{ toYaml .Values.extraEnvs | indent 8 }} {{- .Values.extraEnvs | toYaml | nindent 12 }}
{{- end }} {{- end }}
resources: resources:
{{ toYaml .Values.resources | indent 10 }} {{ toYaml .Values.resources | indent 10 }}

View File

@ -1,7 +1,7 @@
image: image:
registry: ghcr.io registry: ghcr.io
repository: zalando/postgres-operator repository: zalando/postgres-operator
tag: v1.15.0 tag: v1.13.0
pullPolicy: "IfNotPresent" pullPolicy: "IfNotPresent"
# Optionally specify an array of imagePullSecrets. # Optionally specify an array of imagePullSecrets.
@ -38,7 +38,7 @@ configGeneral:
# etcd connection string for Patroni. Empty uses K8s-native DCS. # etcd connection string for Patroni. Empty uses K8s-native DCS.
etcd_host: "" etcd_host: ""
# Spilo docker image # Spilo docker image
docker_image: ghcr.io/zalando/spilo-17:4.0-p3 docker_image: ghcr.io/zalando/spilo-16:3.3-p1
# key name for annotation to ignore globally configured instance limits # key name for annotation to ignore globally configured instance limits
# ignore_instance_limits_annotation_key: "" # ignore_instance_limits_annotation_key: ""
@ -89,9 +89,9 @@ configMajorVersionUpgrade:
# - acid # - acid
# minimal Postgres major version that will not automatically be upgraded # minimal Postgres major version that will not automatically be upgraded
minimal_major_version: "13" minimal_major_version: "12"
# target Postgres major version when upgrading clusters automatically # target Postgres major version when upgrading clusters automatically
target_major_version: "17" target_major_version: "16"
configKubernetes: configKubernetes:
# list of additional capabilities for postgres container # list of additional capabilities for postgres container
@ -364,7 +364,7 @@ configLogicalBackup:
# logical_backup_memory_request: "" # logical_backup_memory_request: ""
# image for pods of the logical backup job (example runs pg_dumpall) # image for pods of the logical backup job (example runs pg_dumpall)
logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.15.0" logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0"
# path of google cloud service account json file # path of google cloud service account json file
# logical_backup_google_application_credentials: "" # logical_backup_google_application_credentials: ""

View File

@ -35,8 +35,6 @@ func init() {
flag.BoolVar(&outOfCluster, "outofcluster", false, "Whether the operator runs in- our outside of the Kubernetes cluster.") flag.BoolVar(&outOfCluster, "outofcluster", false, "Whether the operator runs in- our outside of the Kubernetes cluster.")
flag.BoolVar(&config.NoDatabaseAccess, "nodatabaseaccess", false, "Disable all access to the database from the operator side.") flag.BoolVar(&config.NoDatabaseAccess, "nodatabaseaccess", false, "Disable all access to the database from the operator side.")
flag.BoolVar(&config.NoTeamsAPI, "noteamsapi", false, "Disable all access to the teams API") flag.BoolVar(&config.NoTeamsAPI, "noteamsapi", false, "Disable all access to the teams API")
flag.IntVar(&config.KubeQPS, "kubeqps", 10, "Kubernetes api requests per second.")
flag.IntVar(&config.KubeBurst, "kubeburst", 20, "Kubernetes api requests burst limit.")
flag.Parse() flag.Parse()
config.EnableJsonLogging = os.Getenv("ENABLE_JSON_LOGGING") == "true" config.EnableJsonLogging = os.Getenv("ENABLE_JSON_LOGGING") == "true"
@ -85,9 +83,6 @@ func main() {
log.Fatalf("couldn't get REST config: %v", err) log.Fatalf("couldn't get REST config: %v", err)
} }
config.RestConfig.QPS = float32(config.KubeQPS)
config.RestConfig.Burst = config.KubeBurst
c := controller.NewController(&config, "") c := controller.NewController(&config, "")
c.Run(stop, wg) c.Run(stop, wg)

View File

@ -1,4 +1,4 @@
FROM golang:1.25-alpine FROM golang:1.22-alpine
LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>" LABEL maintainer="Team ACID @ Zalando <team-acid@zalando.de>"
# We need root certificates to deal with teams api over https # We need root certificates to deal with teams api over https

View File

@ -1,5 +1,5 @@
ARG BASE_IMAGE=registry.opensource.zalan.do/library/alpine-3:latest ARG BASE_IMAGE=registry.opensource.zalan.do/library/alpine-3:latest
FROM golang:1.25-alpine AS builder FROM golang:1.22-alpine AS builder
ARG VERSION=latest ARG VERSION=latest
COPY . /go/src/github.com/zalando/postgres-operator COPY . /go/src/github.com/zalando/postgres-operator

View File

@ -13,7 +13,7 @@ apt-get install -y wget
( (
cd /tmp cd /tmp
wget -q "https://storage.googleapis.com/golang/go1.25.3.linux-${arch}.tar.gz" -O go.tar.gz wget -q "https://storage.googleapis.com/golang/go1.22.5.linux-${arch}.tar.gz" -O go.tar.gz
tar -xf go.tar.gz tar -xf go.tar.gz
mv go /usr/local mv go /usr/local
ln -s /usr/local/go/bin/go /usr/bin/go ln -s /usr/local/go/bin/go /usr/bin/go

View File

@ -63,17 +63,14 @@ the `PGVERSION` environment variable is set for the database pods. Since
`v1.6.0` the related option `enable_pgversion_env_var` is enabled by default. `v1.6.0` the related option `enable_pgversion_env_var` is enabled by default.
In-place major version upgrades can be configured to be executed by the In-place major version upgrades can be configured to be executed by the
operator with the `major_version_upgrade_mode` option. By default, it is operator with the `major_version_upgrade_mode` option. By default it is set
enabled (mode: `manual`). In any case, altering the version in the manifest to `off` which means the cluster version will not change when increased in
will trigger a rolling update of pods to update the `PGVERSION` env variable. the manifest. Still, a rolling update would be triggered updating the
Spilo's [`configure_spilo`](https://github.com/zalando/spilo/blob/master/postgres-appliance/scripts/configure_spilo.py) `PGVERSION` variable. But Spilo's [`configure_spilo`](https://github.com/zalando/spilo/blob/master/postgres-appliance/scripts/configure_spilo.py)
script will notice the version mismatch but start the current version again. script will notice the version mismatch and start the old version again.
Next, the operator would call an updage script inside Spilo. When automatic In this scenario the major version could then be run by a user from within the
upgrades are disabled (mode: `off`) the upgrade could still be run by a user primary pod. Exec into the container and run:
from within the primary pod. This gives you full control about the point in
time when the upgrade can be started (check also maintenance windows below).
Exec into the container and run:
```bash ```bash
python3 /scripts/inplace_upgrade.py N python3 /scripts/inplace_upgrade.py N
``` ```
@ -82,32 +79,11 @@ The upgrade is usually fast, well under one minute for most DBs. Note, that
changes become irrevertible once `pg_upgrade` is called. To understand the changes become irrevertible once `pg_upgrade` is called. To understand the
upgrade procedure, refer to the [corresponding PR in Spilo](https://github.com/zalando/spilo/pull/488). upgrade procedure, refer to the [corresponding PR in Spilo](https://github.com/zalando/spilo/pull/488).
When `major_version_upgrade_mode` is set to `full` the operator will compare When `major_version_upgrade_mode` is set to `manual` the operator will run
the version in the manifest with the configured `minimal_major_version`. If it the upgrade script for you after the manifest is updated and pods are rotated.
is lower the operator would start an automatic upgrade as described above. The It is also possible to define `maintenanceWindows` in the Postgres manifest to
configured `major_target_version` will be used as the new version. This option better control when such automated upgrades should take place after increasing
can be useful if you have to get rid of outdated major versions in your fleet. the version.
Please note, that the operator does not patch the version in the manifest.
Thus, the `full` mode can create drift between desired and actual state.
### Upgrade during maintenance windows
When `maintenanceWindows` are defined in the Postgres manifest the operator
will trigger a major version upgrade only during these periods. Make sure they
are at least twice as long as your configured `resync_period` to guarantee
that operator actions can be triggered.
### Upgrade annotations
When an upgrade is executed, the operator sets an annotation in the PostgreSQL
resource, either `last-major-upgrade-success` if the upgrade succeeds, or
`last-major-upgrade-failure` if it fails. The value of the annotation is a
timestamp indicating when the upgrade occurred.
If a PostgreSQL resource contains a failure annotation, the operator will not
attempt to retry the upgrade during a sync event. To remove the failure
annotation, you can revert the PostgreSQL version back to the current version.
This action will trigger the removal of the failure annotation.
## Non-default cluster domain ## Non-default cluster domain
@ -195,14 +171,12 @@ from numerous escape characters in the latter log entry, view it in CLI with
used internally in K8s. used internally in K8s.
The StatefulSet is replaced if the following properties change: The StatefulSet is replaced if the following properties change:
- annotations - annotations
- volumeClaimTemplates - volumeClaimTemplates
- template volumes - template volumes
The StatefulSet is replaced and a rolling updates is triggered if the following The StatefulSet is replaced and a rolling updates is triggered if the following
properties differ between the old and new state: properties differ between the old and new state:
- container name, ports, image, resources, env, envFrom, securityContext and volumeMounts - container name, ports, image, resources, env, envFrom, securityContext and volumeMounts
- template labels, annotations, service account, securityContext, affinity, priority class and termination grace period - template labels, annotations, service account, securityContext, affinity, priority class and termination grace period
@ -386,7 +360,7 @@ exceptions:
The interval of days can be set with `password_rotation_interval` (default The interval of days can be set with `password_rotation_interval` (default
`90` = 90 days, minimum 1). On each rotation the user name and password values `90` = 90 days, minimum 1). On each rotation the user name and password values
are replaced in the K8s secret. They belong to a newly created user named after are replaced in the K8s secret. They belong to a newly created user named after
the original role plus rotation date in YYMMDD format. All privileges are the original role plus rotation date in YYMMDD format. All priviliges are
inherited meaning that migration scripts should still grant and revoke rights inherited meaning that migration scripts should still grant and revoke rights
against the original role. The timestamp of the next rotation (in RFC 3339 against the original role. The timestamp of the next rotation (in RFC 3339
format, UTC timezone) is written to the secret as well. Note, if the rotation format, UTC timezone) is written to the secret as well. Note, if the rotation
@ -566,7 +540,7 @@ manifest affinity.
``` ```
If `node_readiness_label_merge` is set to `"OR"` (default) the readiness label If `node_readiness_label_merge` is set to `"OR"` (default) the readiness label
affinity will be appended with its own expressions block: affinty will be appended with its own expressions block:
```yaml ```yaml
affinity: affinity:
@ -622,34 +596,22 @@ By default the topology key for the pod anti affinity is set to
`kubernetes.io/hostname`, you can set another topology key e.g. `kubernetes.io/hostname`, you can set another topology key e.g.
`failure-domain.beta.kubernetes.io/zone`. See [built-in node labels](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#interlude-built-in-node-labels) for available topology keys. `failure-domain.beta.kubernetes.io/zone`. See [built-in node labels](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#interlude-built-in-node-labels) for available topology keys.
## Pod Disruption Budgets ## Pod Disruption Budget
By default the operator creates two PodDisruptionBudgets (PDB) to protect the cluster By default the operator uses a PodDisruptionBudget (PDB) to protect the cluster
from voluntarily disruptions and hence unwanted DB downtime: so-called primary PDB and from voluntarily disruptions and hence unwanted DB downtime. The `MinAvailable`
and PDB for critical operations. parameter of the PDB is set to `1` which prevents killing masters in single-node
clusters and/or the last remaining running instance in a multi-node cluster.
### Primary PDB
The `MinAvailable` parameter of this PDB is set to `1` and, if `pdb_master_label_selector`
is enabled, label selector includes `spilo-role=master` condition, which prevents killing
masters in single-node clusters and/or the last remaining running instance in a multi-node
cluster.
## PDB for critical operations
The `MinAvailable` parameter of this PDB is equal to the `numberOfInstances` set in the
cluster manifest, while label selector includes `critical-operation=true` condition. This
allows to protect all pods of a cluster, given they are labeled accordingly.
For example, Operator labels all Spilo pods with `critical-operation=true` during the major
version upgrade run. You may want to protect cluster pods during other critical operations
by assigning the label to pods yourself or using other means of automation.
The PDB is only relaxed in two scenarios: The PDB is only relaxed in two scenarios:
* If a cluster is scaled down to `0` instances (e.g. for draining nodes) * If a cluster is scaled down to `0` instances (e.g. for draining nodes)
* If the PDB is disabled in the configuration (`enable_pod_disruption_budget`) * If the PDB is disabled in the configuration (`enable_pod_disruption_budget`)
The PDBs are still in place having `MinAvailable` set to `0`. Disabling PDBs The PDB is still in place having `MinAvailable` set to `0`. If enabled it will
helps avoiding blocking Kubernetes upgrades in managed K8s environments at the be automatically set to `1` on scale up. Disabling PDBs helps avoiding blocking
cost of prolonged DB downtime. See PR [#384](https://github.com/zalando/postgres-operator/pull/384) Kubernetes upgrades in managed K8s environments at the cost of prolonged DB
downtime. See PR [#384](https://github.com/zalando/postgres-operator/pull/384)
for the use case. for the use case.
## Add cluster-specific labels ## Add cluster-specific labels
@ -900,7 +862,6 @@ services:
There are multiple options to specify service annotations that will be merged There are multiple options to specify service annotations that will be merged
with each other and override in the following order (where latter take with each other and override in the following order (where latter take
precedence): precedence):
1. Default annotations if LoadBalancer is enabled 1. Default annotations if LoadBalancer is enabled
2. Globally configured `custom_service_annotations` 2. Globally configured `custom_service_annotations`
3. `serviceAnnotations` specified in the cluster manifest 3. `serviceAnnotations` specified in the cluster manifest
@ -1143,7 +1104,7 @@ metadata:
iam.gke.io/gcp-service-account: <GCP_SERVICE_ACCOUNT_NAME>@<GCP_PROJECT_ID>.iam.gserviceaccount.com iam.gke.io/gcp-service-account: <GCP_SERVICE_ACCOUNT_NAME>@<GCP_PROJECT_ID>.iam.gserviceaccount.com
``` ```
2. Specify the new custom service account in your [operator parameters](./reference/operator_parameters.md) 2. Specify the new custom service account in your [operator paramaters](./reference/operator_parameters.md)
If using manual deployment or kustomize, this is done by setting If using manual deployment or kustomize, this is done by setting
`pod_service_account_name` in your configuration file specified in the `pod_service_account_name` in your configuration file specified in the
@ -1312,7 +1273,7 @@ aws_or_gcp:
If cluster members have to be (re)initialized restoring physical backups If cluster members have to be (re)initialized restoring physical backups
happens automatically either from the backup location or by running happens automatically either from the backup location or by running
[pg_basebackup](https://www.postgresql.org/docs/17/app-pgbasebackup.html) [pg_basebackup](https://www.postgresql.org/docs/16/app-pgbasebackup.html)
on one of the other running instances (preferably replicas if they do not lag on one of the other running instances (preferably replicas if they do not lag
behind). You can test restoring backups by [cloning](user.md#how-to-clone-an-existing-postgresql-cluster) behind). You can test restoring backups by [cloning](user.md#how-to-clone-an-existing-postgresql-cluster)
clusters. clusters.
@ -1420,10 +1381,6 @@ configuration:
volumeMounts: volumeMounts:
- mountPath: /custom-pgdata-mountpoint - mountPath: /custom-pgdata-mountpoint
name: pgdata name: pgdata
env:
- name: "ENV_VAR_NAME"
value: "any-k8s-env-things"
command: ['sh', '-c', 'echo "logging" > /opt/logs.txt']
- ... - ...
``` ```

View File

@ -16,7 +16,7 @@ under the ~/go/src sub directories.
Given the schema above, the Postgres Operator source code located at Given the schema above, the Postgres Operator source code located at
`github.com/zalando/postgres-operator` should be put at `github.com/zalando/postgres-operator` should be put at
`~/go/src/github.com/zalando/postgres-operator`. -`~/go/src/github.com/zalando/postgres-operator`.
```bash ```bash
export GOPATH=~/go export GOPATH=~/go
@ -105,7 +105,6 @@ and K8s-like APIs for its custom resource definitions, namely the
Postgres CRD and the operator CRD. The usage of the code generation follows Postgres CRD and the operator CRD. The usage of the code generation follows
conventions from the K8s community. Relevant scripts live in the `hack` conventions from the K8s community. Relevant scripts live in the `hack`
directory: directory:
* `update-codegen.sh` triggers code generation for the APIs defined in `pkg/apis/acid.zalan.do/`, * `update-codegen.sh` triggers code generation for the APIs defined in `pkg/apis/acid.zalan.do/`,
* `verify-codegen.sh` checks if the generated code is up-to-date (to be used within CI). * `verify-codegen.sh` checks if the generated code is up-to-date (to be used within CI).
@ -113,7 +112,6 @@ The `/pkg/generated/` contains the resultant code. To make these scripts work,
you may need to `export GOPATH=$(go env GOPATH)` you may need to `export GOPATH=$(go env GOPATH)`
References for code generation are: References for code generation are:
* [Relevant pull request](https://github.com/zalando/postgres-operator/pull/369) * [Relevant pull request](https://github.com/zalando/postgres-operator/pull/369)
See comments there for minor issues that can sometimes broke the generation process. See comments there for minor issues that can sometimes broke the generation process.
* [Code generator source code](https://github.com/kubernetes/code-generator) * [Code generator source code](https://github.com/kubernetes/code-generator)
@ -188,7 +186,7 @@ go get -u github.com/derekparker/delve/cmd/dlv
``` ```
RUN apk --no-cache add go git musl-dev RUN apk --no-cache add go git musl-dev
RUN go get github.com/derekparker/delve/cmd/dlv RUN go get -d github.com/derekparker/delve/cmd/dlv
``` ```
* Update the `Makefile` to build the project with debugging symbols. For that * Update the `Makefile` to build the project with debugging symbols. For that
@ -317,7 +315,6 @@ precedence.
Update the following Go files that obtain the configuration parameter from the Update the following Go files that obtain the configuration parameter from the
manifest files: manifest files:
* [operator_configuration_type.go](https://github.com/zalando/postgres-operator/blob/master/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go) * [operator_configuration_type.go](https://github.com/zalando/postgres-operator/blob/master/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go)
* [operator_config.go](https://github.com/zalando/postgres-operator/blob/master/pkg/controller/operator_config.go) * [operator_config.go](https://github.com/zalando/postgres-operator/blob/master/pkg/controller/operator_config.go)
* [config.go](https://github.com/zalando/postgres-operator/blob/master/pkg/util/config/config.go) * [config.go](https://github.com/zalando/postgres-operator/blob/master/pkg/util/config/config.go)
@ -326,7 +323,6 @@ Postgres manifest parameters are defined in the [api package](https://github.com
The operator behavior has to be implemented at least in [k8sres.go](https://github.com/zalando/postgres-operator/blob/master/pkg/cluster/k8sres.go). The operator behavior has to be implemented at least in [k8sres.go](https://github.com/zalando/postgres-operator/blob/master/pkg/cluster/k8sres.go).
Validation of CRD parameters is controlled in [crds.go](https://github.com/zalando/postgres-operator/blob/master/pkg/apis/acid.zalan.do/v1/crds.go). Validation of CRD parameters is controlled in [crds.go](https://github.com/zalando/postgres-operator/blob/master/pkg/apis/acid.zalan.do/v1/crds.go).
Please, reflect your changes in tests, for example in: Please, reflect your changes in tests, for example in:
* [config_test.go](https://github.com/zalando/postgres-operator/blob/master/pkg/util/config/config_test.go) * [config_test.go](https://github.com/zalando/postgres-operator/blob/master/pkg/util/config/config_test.go)
* [k8sres_test.go](https://github.com/zalando/postgres-operator/blob/master/pkg/cluster/k8sres_test.go) * [k8sres_test.go](https://github.com/zalando/postgres-operator/blob/master/pkg/cluster/k8sres_test.go)
* [util_test.go](https://github.com/zalando/postgres-operator/blob/master/pkg/apis/acid.zalan.do/v1/util_test.go) * [util_test.go](https://github.com/zalando/postgres-operator/blob/master/pkg/apis/acid.zalan.do/v1/util_test.go)
@ -334,7 +330,6 @@ Please, reflect your changes in tests, for example in:
### Updating manifest files ### Updating manifest files
For the CRD-based configuration, please update the following files: For the CRD-based configuration, please update the following files:
* the default [OperatorConfiguration](https://github.com/zalando/postgres-operator/blob/master/manifests/postgresql-operator-default-configuration.yaml) * the default [OperatorConfiguration](https://github.com/zalando/postgres-operator/blob/master/manifests/postgresql-operator-default-configuration.yaml)
* the CRD's [validation](https://github.com/zalando/postgres-operator/blob/master/manifests/operatorconfiguration.crd.yaml) * the CRD's [validation](https://github.com/zalando/postgres-operator/blob/master/manifests/operatorconfiguration.crd.yaml)
* the CRD's validation in the [Helm chart](https://github.com/zalando/postgres-operator/blob/master/charts/postgres-operator/crds/operatorconfigurations.yaml) * the CRD's validation in the [Helm chart](https://github.com/zalando/postgres-operator/blob/master/charts/postgres-operator/crds/operatorconfigurations.yaml)
@ -347,7 +342,6 @@ Last but no least, update the [ConfigMap](https://github.com/zalando/postgres-op
Finally, add a section for each new configuration option and/or cluster manifest Finally, add a section for each new configuration option and/or cluster manifest
parameter in the reference documents: parameter in the reference documents:
* [config reference](reference/operator_parameters.md) * [config reference](reference/operator_parameters.md)
* [manifest reference](reference/cluster_manifest.md) * [manifest reference](reference/cluster_manifest.md)

View File

@ -10,7 +10,7 @@ hence set it up first. For local tests we recommend to use one of the following
solutions: solutions:
* [minikube](https://github.com/kubernetes/minikube/releases), which creates a * [minikube](https://github.com/kubernetes/minikube/releases), which creates a
K8s cluster inside a container or VM (requires Docker, KVM, Hyper-V, HyperKit, VirtualBox, or similar), single-node K8s cluster inside a VM (requires KVM or VirtualBox),
* [kind](https://kind.sigs.k8s.io/) and [k3d](https://k3d.io), which allows creating multi-nodes K8s * [kind](https://kind.sigs.k8s.io/) and [k3d](https://k3d.io), which allows creating multi-nodes K8s
clusters running on Docker (requires Docker) clusters running on Docker (requires Docker)
@ -20,7 +20,7 @@ This quickstart assumes that you have started minikube or created a local kind
cluster. Note that you can also use built-in K8s support in the Docker Desktop cluster. Note that you can also use built-in K8s support in the Docker Desktop
for Mac to follow the steps of this tutorial. You would have to replace for Mac to follow the steps of this tutorial. You would have to replace
`minikube start` and `minikube delete` with your launch actions for the Docker `minikube start` and `minikube delete` with your launch actions for the Docker
Desktop built-in K8s support. built-in K8s support.
## Configuration Options ## Configuration Options
@ -230,7 +230,7 @@ kubectl delete postgresql acid-minimal-cluster
``` ```
This should remove the associated StatefulSet, database Pods, Services and This should remove the associated StatefulSet, database Pods, Services and
Endpoints. The PersistentVolumes are released and the PodDisruptionBudgets are Endpoints. The PersistentVolumes are released and the PodDisruptionBudget is
deleted. Secrets however are not deleted and backups will remain in place. deleted. Secrets however are not deleted and backups will remain in place.
When deleting a cluster while it is still starting up or got stuck during that When deleting a cluster while it is still starting up or got stuck during that

View File

@ -116,9 +116,9 @@ These parameters are grouped directly under the `spec` key in the manifest.
* **maintenanceWindows** * **maintenanceWindows**
a list which defines specific time frames when certain maintenance operations a list which defines specific time frames when certain maintenance operations
such as automatic major upgrades or master pod migration. Accepted formats are allowed. So far, it is only implemented for automatic major version
are "01:00-06:00" for daily maintenance windows or "Sat:00:00-04:00" for specific upgrades. Accepted formats are "01:00-06:00" for daily maintenance windows or
days, with all times in UTC. "Sat:00:00-04:00" for specific days, with all times in UTC.
* **users** * **users**
a map of usernames to user flags for the users that should be created in the a map of usernames to user flags for the users that should be created in the
@ -247,7 +247,7 @@ These parameters are grouped directly under the `spec` key in the manifest.
[kubernetes volumeSource](https://godoc.org/k8s.io/api/core/v1#VolumeSource). [kubernetes volumeSource](https://godoc.org/k8s.io/api/core/v1#VolumeSource).
It allows you to mount existing PersistentVolumeClaims, ConfigMaps and Secrets inside the StatefulSet. It allows you to mount existing PersistentVolumeClaims, ConfigMaps and Secrets inside the StatefulSet.
Also an `emptyDir` volume can be shared between initContainer and statefulSet. Also an `emptyDir` volume can be shared between initContainer and statefulSet.
Additionally, you can provide a `SubPath` for volume mount (a file in a configMap source volume, for example). Additionaly, you can provide a `SubPath` for volume mount (a file in a configMap source volume, for example).
Set `isSubPathExpr` to true if you want to include [API environment variables](https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath-expanded-environment). Set `isSubPathExpr` to true if you want to include [API environment variables](https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath-expanded-environment).
You can also specify in which container the additional Volumes will be mounted with the `targetContainers` array option. You can also specify in which container the additional Volumes will be mounted with the `targetContainers` array option.
If `targetContainers` is empty, additional volumes will be mounted only in the `postgres` container. If `targetContainers` is empty, additional volumes will be mounted only in the `postgres` container.
@ -257,7 +257,7 @@ These parameters are grouped directly under the `spec` key in the manifest.
## Prepared Databases ## Prepared Databases
The operator can create databases with default owner, reader and writer roles The operator can create databases with default owner, reader and writer roles
without the need to specify them under `users` or `databases` sections. Those without the need to specifiy them under `users` or `databases` sections. Those
parameters are grouped under the `preparedDatabases` top-level key. For more parameters are grouped under the `preparedDatabases` top-level key. For more
information, see [user docs](../user.md#prepared-databases-with-roles-and-default-privileges). information, see [user docs](../user.md#prepared-databases-with-roles-and-default-privileges).
@ -638,7 +638,7 @@ the global configuration before adding the `tls` section'.
## Change data capture streams ## Change data capture streams
This sections enables change data capture (CDC) streams via Postgres' This sections enables change data capture (CDC) streams via Postgres'
[logical decoding](https://www.postgresql.org/docs/17/logicaldecoding.html) [logical decoding](https://www.postgresql.org/docs/16/logicaldecoding.html)
feature and `pgoutput` plugin. While the Postgres operator takes responsibility feature and `pgoutput` plugin. While the Postgres operator takes responsibility
for providing the setup to publish change events, it relies on external tools for providing the setup to publish change events, it relies on external tools
to consume them. At Zalando, we are using a workflow based on to consume them. At Zalando, we are using a workflow based on
@ -652,11 +652,11 @@ can have the following properties:
* **applicationId** * **applicationId**
The application name to which the database and CDC belongs to. For each The application name to which the database and CDC belongs to. For each
set of streams with a distinct `applicationId` a separate stream resource as set of streams with a distinct `applicationId` a separate stream CR as well
well as a separate logical replication slot will be created. This means there as a separate logical replication slot will be created. This means there can
can be different streams in the same database and streams with the same be different streams in the same database and streams with the same
`applicationId` are bundled in one stream resource. The stream resource will `applicationId` are bundled in one stream CR. The stream CR will be called
be called like the Postgres cluster plus "-<applicationId>" suffix. Required. like the Postgres cluster plus "-<applicationId>" suffix. Required.
* **database** * **database**
Name of the database from where events will be published via Postgres' Name of the database from where events will be published via Postgres'
@ -667,37 +667,21 @@ can have the following properties:
* **tables** * **tables**
Defines a map of table names and their properties (`eventType`, `idColumn` Defines a map of table names and their properties (`eventType`, `idColumn`
and `payloadColumn`). Required. and `payloadColumn`). The CDC operator is following the [outbox pattern](https://debezium.io/blog/2019/02/19/reliable-microservices-data-exchange-with-the-outbox-pattern/).
The CDC operator is following the [outbox pattern](https://debezium.io/blog/2019/02/19/reliable-microservices-data-exchange-with-the-outbox-pattern/).
The application is responsible for putting events into a (JSON/B or VARCHAR) The application is responsible for putting events into a (JSON/B or VARCHAR)
payload column of the outbox table in the structure of the specified target payload column of the outbox table in the structure of the specified target
event type. The operator will create a [PUBLICATION](https://www.postgresql.org/docs/17/logical-replication-publication.html) event type. The operator will create a [PUBLICATION](https://www.postgresql.org/docs/16/logical-replication-publication.html)
in Postgres for all tables specified for one `database` and `applicationId`. in Postgres for all tables specified for one `database` and `applicationId`.
The CDC operator will consume from it shortly after transactions are The CDC operator will consume from it shortly after transactions are
committed to the outbox table. The `idColumn` will be used in telemetry for committed to the outbox table. The `idColumn` will be used in telemetry for
the CDC operator. The names for `idColumn` and `payloadColumn` can be the CDC operator. The names for `idColumn` and `payloadColumn` can be
configured. Defaults are `id` and `payload`. The target `eventType` has to configured. Defaults are `id` and `payload`. The target `eventType` has to
be defined. One can also specify a `recoveryEventType` that will be used be defined. Required.
for a dead letter queue. By enabling `ignoreRecovery`, you can choose to
ignore failing events.
* **filter** * **filter**
Streamed events can be filtered by a jsonpath expression for each table. Streamed events can be filtered by a jsonpath expression for each table.
Optional. Optional.
* **enableRecovery**
Flag to enable a dead letter queue recovery for all streams tables.
Alternatively, recovery can also be enable for single outbox tables by only
specifying a `recoveryEventType` and no `enableRecovery` flag. When set to
false or missing, events will be retried until consuming succeeded. You can
use a `filter` expression to get rid of poison pills. Optional.
* **batchSize** * **batchSize**
Defines the size of batches in which events are consumed. Optional. Defines the size of batches in which events are consumed. Optional.
Defaults to 1. Defaults to 1.
* **cpu**
CPU requests to be set as an annotation on the stream resource. Optional.
* **memory**
memory requests to be set as an annotation on the stream resource. Optional.

View File

@ -94,6 +94,9 @@ Those are top-level keys, containing both leaf keys and groups.
* **enable_pgversion_env_var** * **enable_pgversion_env_var**
With newer versions of Spilo, it is preferable to use `PGVERSION` pod environment variable instead of the setting `postgresql.bin_dir` in the `SPILO_CONFIGURATION` env variable. When this option is true, the operator sets `PGVERSION` and omits `postgresql.bin_dir` from `SPILO_CONFIGURATION`. When false, the `postgresql.bin_dir` is set. This setting takes precedence over `PGVERSION`; see PR 222 in Spilo. The default is `true`. With newer versions of Spilo, it is preferable to use `PGVERSION` pod environment variable instead of the setting `postgresql.bin_dir` in the `SPILO_CONFIGURATION` env variable. When this option is true, the operator sets `PGVERSION` and omits `postgresql.bin_dir` from `SPILO_CONFIGURATION`. When false, the `postgresql.bin_dir` is set. This setting takes precedence over `PGVERSION`; see PR 222 in Spilo. The default is `true`.
* **enable_spilo_wal_path_compat**
enables backwards compatible path between Spilo 12 and Spilo 13+ images. The default is `false`.
* **enable_team_id_clustername_prefix** * **enable_team_id_clustername_prefix**
To lower the risk of name clashes between clusters of different teams you To lower the risk of name clashes between clusters of different teams you
can turn on this flag and the operator will sync only clusters where the can turn on this flag and the operator will sync only clusters where the
@ -107,13 +110,8 @@ Those are top-level keys, containing both leaf keys and groups.
* **kubernetes_use_configmaps** * **kubernetes_use_configmaps**
Select if setup uses endpoints (default), or configmaps to manage leader when Select if setup uses endpoints (default), or configmaps to manage leader when
DCS is kubernetes (not etcd or similar). In OpenShift it is not possible to DCS is kubernetes (not etcd or similar). In OpenShift it is not possible to
use endpoints option, and configmaps is required. Starting with K8s 1.33, use endpoints option, and configmaps is required. By default,
endpoints are marked as deprecated. It's recommended to switch to config maps `kubernetes_use_configmaps: false`, meaning endpoints will be used.
instead. But, to do so make sure you scale the Postgres cluster down to just
one primary pod (e.g. using `max_instances` option). Otherwise, you risk
running into a split-brain scenario.
By default, `kubernetes_use_configmaps: false`, meaning endpoints will be used.
Starting from v1.16.0 the default will be changed to `true`.
* **docker_image** * **docker_image**
Spilo Docker image for Postgres instances. For production, don't rely on the Spilo Docker image for Postgres instances. For production, don't rely on the
@ -214,7 +212,7 @@ under the `users` key.
For all `LOGIN` roles that are not database owners the operator can rotate For all `LOGIN` roles that are not database owners the operator can rotate
credentials in the corresponding K8s secrets by replacing the username and credentials in the corresponding K8s secrets by replacing the username and
password. This means, new users will be added on each rotation inheriting password. This means, new users will be added on each rotation inheriting
all privileges from the original roles. The rotation date (in YYMMDD format) all priviliges from the original roles. The rotation date (in YYMMDD format)
is appended to the names of the new user. The timestamp of the next rotation is appended to the names of the new user. The timestamp of the next rotation
is written to the secret. The default is `false`. is written to the secret. The default is `false`.
@ -252,12 +250,12 @@ CRD-configuration, they are grouped under the `major_version_upgrade` key.
* **minimal_major_version** * **minimal_major_version**
The minimal Postgres major version that will not automatically be upgraded The minimal Postgres major version that will not automatically be upgraded
when `major_version_upgrade_mode` is set to `"full"`. The default is `"13"`. when `major_version_upgrade_mode` is set to `"full"`. The default is `"12"`.
* **target_major_version** * **target_major_version**
The target Postgres major version when upgrading clusters automatically The target Postgres major version when upgrading clusters automatically
which violate the configured allowed `minimal_major_version` when which violate the configured allowed `minimal_major_version` when
`major_version_upgrade_mode` is set to `"full"`. The default is `"17"`. `major_version_upgrade_mode` is set to `"full"`. The default is `"16"`.
## Kubernetes resources ## Kubernetes resources
@ -339,13 +337,13 @@ configuration they are grouped under the `kubernetes` key.
pod namespace). pod namespace).
* **pdb_name_format** * **pdb_name_format**
defines the template for primary PDB (Pod Disruption Budget) name created by the defines the template for PDB (Pod Disruption Budget) names created by the
operator. The default is `postgres-{cluster}-pdb`, where `{cluster}` is operator. The default is `postgres-{cluster}-pdb`, where `{cluster}` is
replaced by the cluster name. Only the `{cluster}` placeholders is allowed in replaced by the cluster name. Only the `{cluster}` placeholders is allowed in
the template. the template.
* **pdb_master_label_selector** * **pdb_master_label_selector**
By default the primary PDB will match the master role hence preventing nodes to be By default the PDB will match the master role hence preventing nodes to be
drained if the node_readiness_label is not used. If this option if set to drained if the node_readiness_label is not used. If this option if set to
`false` the `spilo-role=master` selector will not be added to the PDB. `false` the `spilo-role=master` selector will not be added to the PDB.
@ -368,7 +366,7 @@ configuration they are grouped under the `kubernetes` key.
manifest. To keep secrets, set this option to `false`. The default is `true`. manifest. To keep secrets, set this option to `false`. The default is `true`.
* **enable_persistent_volume_claim_deletion** * **enable_persistent_volume_claim_deletion**
By default, the operator deletes persistent volume claims when removing the By default, the operator deletes PersistentVolumeClaims when removing the
Postgres cluster manifest, no matter if `persistent_volume_claim_retention_policy` Postgres cluster manifest, no matter if `persistent_volume_claim_retention_policy`
on the statefulset is set to `retain`. To keep PVCs set this option to `false`. on the statefulset is set to `retain`. To keep PVCs set this option to `false`.
The default is `true`. The default is `true`.
@ -557,7 +555,7 @@ configuration they are grouped under the `kubernetes` key.
pods with `InitialDelaySeconds: 6`, `PeriodSeconds: 10`, `TimeoutSeconds: 5`, pods with `InitialDelaySeconds: 6`, `PeriodSeconds: 10`, `TimeoutSeconds: 5`,
`SuccessThreshold: 1` and `FailureThreshold: 3`. When enabling readiness `SuccessThreshold: 1` and `FailureThreshold: 3`. When enabling readiness
probes it is recommended to switch the `pod_management_policy` to `parallel` probes it is recommended to switch the `pod_management_policy` to `parallel`
to avoid unnecessary waiting times in case of multiple instances failing. to avoid unneccesary waiting times in case of multiple instances failing.
The default is `false`. The default is `false`.
* **storage_resize_mode** * **storage_resize_mode**
@ -706,7 +704,7 @@ In the CRD-based configuration they are grouped under the `load_balancer` key.
replaced by the cluster name, `{namespace}` is replaced with the namespace replaced by the cluster name, `{namespace}` is replaced with the namespace
and `{hostedzone}` is replaced with the hosted zone (the value of the and `{hostedzone}` is replaced with the hosted zone (the value of the
`db_hosted_zone` parameter). The `{team}` placeholder can still be used, `db_hosted_zone` parameter). The `{team}` placeholder can still be used,
although it is not recommended because the team of a cluster can change. although it is not recommened because the team of a cluster can change.
If the cluster name starts with the `teamId` it will also be part of the If the cluster name starts with the `teamId` it will also be part of the
DNS, aynway. No other placeholders are allowed! DNS, aynway. No other placeholders are allowed!
@ -725,7 +723,7 @@ In the CRD-based configuration they are grouped under the `load_balancer` key.
is replaced by the cluster name, `{namespace}` is replaced with the is replaced by the cluster name, `{namespace}` is replaced with the
namespace and `{hostedzone}` is replaced with the hosted zone (the value of namespace and `{hostedzone}` is replaced with the hosted zone (the value of
the `db_hosted_zone` parameter). The `{team}` placeholder can still be used, the `db_hosted_zone` parameter). The `{team}` placeholder can still be used,
although it is not recommended because the team of a cluster can change. although it is not recommened because the team of a cluster can change.
If the cluster name starts with the `teamId` it will also be part of the If the cluster name starts with the `teamId` it will also be part of the
DNS, aynway. No other placeholders are allowed! DNS, aynway. No other placeholders are allowed!

View File

@ -30,7 +30,7 @@ spec:
databases: databases:
foo: zalando foo: zalando
postgresql: postgresql:
version: "17" version: "16"
``` ```
Once you cloned the Postgres Operator [repository](https://github.com/zalando/postgres-operator) Once you cloned the Postgres Operator [repository](https://github.com/zalando/postgres-operator)
@ -109,7 +109,7 @@ metadata:
spec: spec:
[...] [...]
postgresql: postgresql:
version: "17" version: "16"
parameters: parameters:
password_encryption: scram-sha-256 password_encryption: scram-sha-256
``` ```
@ -517,7 +517,7 @@ Postgres Operator will create the following NOLOGIN roles:
The `<dbname>_owner` role is the database owner and should be used when creating The `<dbname>_owner` role is the database owner and should be used when creating
new database objects. All members of the `admin` role, e.g. teams API roles, can new database objects. All members of the `admin` role, e.g. teams API roles, can
become the owner with the `SET ROLE` command. [Default privileges](https://www.postgresql.org/docs/17/sql-alterdefaultprivileges.html) become the owner with the `SET ROLE` command. [Default privileges](https://www.postgresql.org/docs/16/sql-alterdefaultprivileges.html)
are configured for the owner role so that the `<dbname>_reader` role are configured for the owner role so that the `<dbname>_reader` role
automatically gets read-access (SELECT) to new tables and sequences and the automatically gets read-access (SELECT) to new tables and sequences and the
`<dbname>_writer` receives write-access (INSERT, UPDATE, DELETE on tables, `<dbname>_writer` receives write-access (INSERT, UPDATE, DELETE on tables,
@ -594,7 +594,7 @@ spec:
### Schema `search_path` for default roles ### Schema `search_path` for default roles
The schema [`search_path`](https://www.postgresql.org/docs/17/ddl-schemas.html#DDL-SCHEMAS-PATH) The schema [`search_path`](https://www.postgresql.org/docs/16/ddl-schemas.html#DDL-SCHEMAS-PATH)
for each role will include the role name and the schemas, this role should have for each role will include the role name and the schemas, this role should have
access to. So `foo_bar_writer` does not have to schema-qualify tables from access to. So `foo_bar_writer` does not have to schema-qualify tables from
schemas `foo_bar_writer, bar`, while `foo_writer` can look up `foo_writer` and schemas `foo_bar_writer, bar`, while `foo_writer` can look up `foo_writer` and
@ -695,7 +695,7 @@ handle it.
### HugePages support ### HugePages support
The operator supports [HugePages](https://www.postgresql.org/docs/17/kernel-resources.html#LINUX-HUGEPAGES). The operator supports [HugePages](https://www.postgresql.org/docs/16/kernel-resources.html#LINUX-HUGEPAGES).
To enable HugePages, set the matching resource requests and/or limits in the manifest: To enable HugePages, set the matching resource requests and/or limits in the manifest:
```yaml ```yaml
@ -838,7 +838,7 @@ spec:
### Clone directly ### Clone directly
Another way to get a fresh copy of your source DB cluster is via Another way to get a fresh copy of your source DB cluster is via
[pg_basebackup](https://www.postgresql.org/docs/17/app-pgbasebackup.html). To [pg_basebackup](https://www.postgresql.org/docs/16/app-pgbasebackup.html). To
use this feature simply leave out the timestamp field from the clone section. use this feature simply leave out the timestamp field from the clone section.
The operator will connect to the service of the source cluster by name. If the The operator will connect to the service of the source cluster by name. If the
cluster is called test, then the connection string will look like host=test cluster is called test, then the connection string will look like host=test
@ -900,7 +900,7 @@ the PostgreSQL version between source and target cluster has to be the same.
To start a cluster as standby, add the following `standby` section in the YAML To start a cluster as standby, add the following `standby` section in the YAML
file. You can stream changes from archived WAL files (AWS S3 or Google Cloud file. You can stream changes from archived WAL files (AWS S3 or Google Cloud
Storage) or from a remote primary. Only one option can be specified in the Storage) or from a remote primary. Only one option can be specfied in the
manifest: manifest:
```yaml ```yaml
@ -911,7 +911,7 @@ spec:
For GCS, you have to define STANDBY_GOOGLE_APPLICATION_CREDENTIALS as a For GCS, you have to define STANDBY_GOOGLE_APPLICATION_CREDENTIALS as a
[custom pod environment variable](administrator.md#custom-pod-environment-variables). [custom pod environment variable](administrator.md#custom-pod-environment-variables).
It is not set from the config to allow for overriding. It is not set from the config to allow for overridding.
```yaml ```yaml
spec: spec:
@ -1005,7 +1005,6 @@ spec:
env: env:
- name: "ENV_VAR_NAME" - name: "ENV_VAR_NAME"
value: "any-k8s-env-things" value: "any-k8s-env-things"
command: ['sh', '-c', 'echo "logging" > /opt/logs.txt']
``` ```
In addition to any environment variables you specify, the following environment In addition to any environment variables you specify, the following environment
@ -1282,7 +1281,7 @@ minutes if the certificates have changed and reloads postgres accordingly.
### TLS certificates for connection pooler ### TLS certificates for connection pooler
By default, the pgBouncer image generates its own TLS certificate like Spilo. By default, the pgBouncer image generates its own TLS certificate like Spilo.
When the `tls` section is specified in the manifest it will be used for the When the `tls` section is specfied in the manifest it will be used for the
connection pooler pod(s) as well. The security context options are hard coded connection pooler pod(s) as well. The security context options are hard coded
to `runAsUser: 100` and `runAsGroup: 101`. The `fsGroup` will be the same to `runAsUser: 100` and `runAsGroup: 101`. The `fsGroup` will be the same
like for Spilo. like for Spilo.

View File

@ -15,7 +15,7 @@ RUN apt-get update \
curl \ curl \
vim \ vim \
&& pip3 install --no-cache-dir -r requirements.txt \ && pip3 install --no-cache-dir -r requirements.txt \
&& curl -LO https://dl.k8s.io/release/v1.32.9/bin/linux/amd64/kubectl \ && curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.24.3/bin/linux/amd64/kubectl \
&& chmod +x ./kubectl \ && chmod +x ./kubectl \
&& mv ./kubectl /usr/local/bin/kubectl \ && mv ./kubectl /usr/local/bin/kubectl \
&& apt-get clean \ && apt-get clean \

View File

@ -46,7 +46,7 @@ tools:
# install pinned version of 'kind' # install pinned version of 'kind'
# go install must run outside of a dir with a (module-based) Go project ! # go install must run outside of a dir with a (module-based) Go project !
# otherwise go install updates project's dependencies and/or behaves differently # otherwise go install updates project's dependencies and/or behaves differently
cd "/tmp" && GO111MODULE=on go install sigs.k8s.io/kind@v0.24.0 cd "/tmp" && GO111MODULE=on go install sigs.k8s.io/kind@v0.23.0
e2etest: tools copy clean e2etest: tools copy clean
./run.sh main ./run.sh main

View File

@ -8,7 +8,7 @@ IFS=$'\n\t'
readonly cluster_name="postgres-operator-e2e-tests" readonly cluster_name="postgres-operator-e2e-tests"
readonly kubeconfig_path="/tmp/kind-config-${cluster_name}" readonly kubeconfig_path="/tmp/kind-config-${cluster_name}"
readonly spilo_image="registry.opensource.zalan.do/acid/spilo-17-e2e:0.3" readonly spilo_image="registry.opensource.zalan.do/acid/spilo-16-e2e:0.1"
readonly e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.4" readonly e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.4"
export GOPATH=${GOPATH-~/go} export GOPATH=${GOPATH-~/go}

View File

@ -12,9 +12,10 @@ from kubernetes import client
from tests.k8s_api import K8s from tests.k8s_api import K8s
from kubernetes.client.rest import ApiException from kubernetes.client.rest import ApiException
SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-17-e2e:0.3" SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.1"
SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-17-e2e:0.4" SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-16-e2e:0.2"
SPILO_FULL_IMAGE = "ghcr.io/zalando/spilo-17:4.0-p3" SPILO_FULL_IMAGE = "ghcr.io/zalando/spilo-16:3.2-p3"
def to_selector(labels): def to_selector(labels):
return ",".join(["=".join(lbl) for lbl in labels.items()]) return ",".join(["=".join(lbl) for lbl in labels.items()])
@ -1184,33 +1185,27 @@ class EndToEndTestCase(unittest.TestCase):
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_major_version_upgrade(self): def test_major_version_upgrade(self):
""" """
Test major version upgrade: with full upgrade, maintenance window, and annotation Test major version upgrade
""" """
def check_version(): def check_version():
p = k8s.patroni_rest("acid-upgrade-test-0", "") or {} p = k8s.patroni_rest("acid-upgrade-test-0", "")
version = p.get("server_version", 0) // 10000 version = p.get("server_version", 0) // 10000
return version return version
def get_annotations():
pg_manifest = k8s.api.custom_objects_api.get_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test")
annotations = pg_manifest["metadata"]["annotations"]
return annotations
k8s = self.k8s k8s = self.k8s
cluster_label = 'application=spilo,cluster-name=acid-upgrade-test' cluster_label = 'application=spilo,cluster-name=acid-upgrade-test'
with open("manifests/minimal-postgres-lowest-version-manifest.yaml", 'r+') as f: with open("manifests/minimal-postgres-manifest-12.yaml", 'r+') as f:
upgrade_manifest = yaml.safe_load(f) upgrade_manifest = yaml.safe_load(f)
upgrade_manifest["spec"]["dockerImage"] = SPILO_FULL_IMAGE upgrade_manifest["spec"]["dockerImage"] = SPILO_FULL_IMAGE
with open("manifests/minimal-postgres-lowest-version-manifest.yaml", 'w') as f: with open("manifests/minimal-postgres-manifest-12.yaml", 'w') as f:
yaml.dump(upgrade_manifest, f, Dumper=yaml.Dumper) yaml.dump(upgrade_manifest, f, Dumper=yaml.Dumper)
k8s.create_with_kubectl("manifests/minimal-postgres-lowest-version-manifest.yaml") k8s.create_with_kubectl("manifests/minimal-postgres-manifest-12.yaml")
self.eventuallyEqual(lambda: k8s.count_running_pods(labels=cluster_label), 2, "No 2 pods running") self.eventuallyEqual(lambda: k8s.count_running_pods(labels=cluster_label), 2, "No 2 pods running")
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
self.eventuallyEqual(check_version, 13, "Version is not correct") self.eventuallyEqual(check_version, 12, "Version is not correct")
master_nodes, _ = k8s.get_cluster_nodes(cluster_labels=cluster_label) master_nodes, _ = k8s.get_cluster_nodes(cluster_labels=cluster_label)
# should upgrade immediately # should upgrade immediately
@ -1225,19 +1220,16 @@ class EndToEndTestCase(unittest.TestCase):
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_14) "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_14)
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
# should have finish failover
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
self.eventuallyEqual(check_version, 14, "Version should be upgraded from 13 to 14") self.eventuallyEqual(check_version, 14, "Version should be upgraded from 12 to 14")
# check if annotation for last upgrade's success is set
annotations = get_annotations()
self.assertIsNotNone(annotations.get("last-major-upgrade-success"), "Annotation for last upgrade's success is not set")
# should not upgrade because current time is not in maintenanceWindow # should not upgrade because current time is not in maintenanceWindow
current_time = datetime.now() current_time = datetime.now()
maintenance_window_future = f"{(current_time+timedelta(minutes=60)).strftime('%H:%M')}-{(current_time+timedelta(minutes=120)).strftime('%H:%M')}" maintenance_window_future = f"{(current_time+timedelta(minutes=60)).strftime('%H:%M')}-{(current_time+timedelta(minutes=120)).strftime('%H:%M')}"
pg_patch_version_15_outside_mw = { pg_patch_version_15 = {
"spec": { "spec": {
"postgresql": { "postgresql": {
"version": "15" "version": "15"
@ -1248,23 +1240,21 @@ class EndToEndTestCase(unittest.TestCase):
} }
} }
k8s.api.custom_objects_api.patch_namespaced_custom_object( k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15_outside_mw) "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15)
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
# no pod replacement outside of the maintenance window # should have finish failover
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=master,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
self.eventuallyEqual(check_version, 14, "Version should not be upgraded") self.eventuallyEqual(check_version, 14, "Version should not be upgraded")
second_annotations = get_annotations() # change the version again to trigger operator sync
self.assertIsNone(second_annotations.get("last-major-upgrade-failure"), "Annotation for last upgrade's failure should not be set")
# change maintenanceWindows to current
maintenance_window_current = f"{(current_time-timedelta(minutes=30)).strftime('%H:%M')}-{(current_time+timedelta(minutes=30)).strftime('%H:%M')}" maintenance_window_current = f"{(current_time-timedelta(minutes=30)).strftime('%H:%M')}-{(current_time+timedelta(minutes=30)).strftime('%H:%M')}"
pg_patch_version_15_in_mw = { pg_patch_version_16 = {
"spec": { "spec": {
"postgresql": { "postgresql": {
"version": "15" "version": "16"
}, },
"maintenanceWindows": [ "maintenanceWindows": [
maintenance_window_current maintenance_window_current
@ -1273,52 +1263,14 @@ class EndToEndTestCase(unittest.TestCase):
} }
k8s.api.custom_objects_api.patch_namespaced_custom_object( k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15_in_mw) "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_16)
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=master,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
self.eventuallyEqual(check_version, 15, "Version should be upgraded from 14 to 15")
# check if annotation for last upgrade's success is updated after second upgrade
third_annotations = get_annotations()
self.assertIsNotNone(third_annotations.get("last-major-upgrade-success"), "Annotation for last upgrade's success is not set")
self.assertNotEqual(annotations.get("last-major-upgrade-success"), third_annotations.get("last-major-upgrade-success"), "Annotation for last upgrade's success is not updated")
# test upgrade with failed upgrade annotation
pg_patch_version_17 = {
"metadata": {
"annotations": {
"last-major-upgrade-failure": "2024-01-02T15:04:05Z"
},
},
"spec": {
"postgresql": {
"version": "17"
},
},
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_17)
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
# should have finish failover
k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
self.eventuallyEqual(check_version, 15, "Version should not be upgraded because annotation for last upgrade's failure is set") self.eventuallyEqual(check_version, 16, "Version should be upgraded from 14 to 16")
# change the version back to 15 and should remove failure annotation
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15_in_mw)
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
k8s.wait_for_pod_start('spilo-role=master,' + cluster_label)
k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label)
self.eventuallyEqual(check_version, 15, "Version should not be upgraded from 15")
fourth_annotations = get_annotations()
self.assertIsNone(fourth_annotations.get("last-major-upgrade-failure"), "Annotation for last upgrade's failure is not removed")
@timeout_decorator.timeout(TEST_TIMEOUT_SEC) @timeout_decorator.timeout(TEST_TIMEOUT_SEC)
def test_persistent_volume_claim_retention_policy(self): def test_persistent_volume_claim_retention_policy(self):
@ -1752,13 +1704,9 @@ class EndToEndTestCase(unittest.TestCase):
Test password rotation and removal of users due to retention policy Test password rotation and removal of users due to retention policy
''' '''
k8s = self.k8s k8s = self.k8s
cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster'
leader = k8s.get_cluster_leader_pod() leader = k8s.get_cluster_leader_pod()
today = date.today() today = date.today()
# remember number of secrets to make sure it stays the same
secret_count = k8s.count_secrets_with_label(cluster_label)
# enable password rotation for owner of foo database # enable password rotation for owner of foo database
pg_patch_rotation_single_users = { pg_patch_rotation_single_users = {
"spec": { "spec": {
@ -1814,7 +1762,6 @@ class EndToEndTestCase(unittest.TestCase):
enable_password_rotation = { enable_password_rotation = {
"data": { "data": {
"enable_password_rotation": "true", "enable_password_rotation": "true",
"inherited_annotations": "environment",
"password_rotation_interval": "30", "password_rotation_interval": "30",
"password_rotation_user_retention": "30", # should be set to 60 "password_rotation_user_retention": "30", # should be set to 60
}, },
@ -1861,29 +1808,13 @@ class EndToEndTestCase(unittest.TestCase):
self.eventuallyEqual(lambda: len(self.query_database_with_user(leader.metadata.name, "postgres", "SELECT 1", "foo_user")), 1, self.eventuallyEqual(lambda: len(self.query_database_with_user(leader.metadata.name, "postgres", "SELECT 1", "foo_user")), 1,
"Could not connect to the database with rotation user {}".format(rotation_user), 10, 5) "Could not connect to the database with rotation user {}".format(rotation_user), 10, 5)
# add annotation which triggers syncSecrets call
pg_annotation_patch = {
"metadata": {
"annotations": {
"environment": "test",
}
}
}
k8s.api.custom_objects_api.patch_namespaced_custom_object(
"acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_annotation_patch)
self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync")
time.sleep(10)
self.eventuallyEqual(lambda: k8s.count_secrets_with_label(cluster_label), secret_count, "Unexpected number of secrets")
# check if rotation has been ignored for user from test_cross_namespace_secrets test # check if rotation has been ignored for user from test_cross_namespace_secrets test
db_user_secret = k8s.get_secret(username="test.db_user", namespace="test") db_user_secret = k8s.get_secret(username="test.db_user", namespace="test")
secret_username = str(base64.b64decode(db_user_secret.data["username"]), 'utf-8') secret_username = str(base64.b64decode(db_user_secret.data["username"]), 'utf-8')
self.assertEqual("test.db_user", secret_username, self.assertEqual("test.db_user", secret_username,
"Unexpected username in secret of test.db_user: expected {}, got {}".format("test.db_user", secret_username)) "Unexpected username in secret of test.db_user: expected {}, got {}".format("test.db_user", secret_username))
# check if annotation for secret has been updated
self.assertTrue("environment" in db_user_secret.metadata.annotations, "Added annotation was not propagated to secret")
# disable password rotation for all other users (foo_user) # disable password rotation for all other users (foo_user)
# and pick smaller intervals to see if the third fake rotation user is dropped # and pick smaller intervals to see if the third fake rotation user is dropped
enable_password_rotation = { enable_password_rotation = {
@ -2121,7 +2052,7 @@ class EndToEndTestCase(unittest.TestCase):
patch_sset_propagate_annotations = { patch_sset_propagate_annotations = {
"data": { "data": {
"downscaler_annotations": "deployment-time,downscaler/*", "downscaler_annotations": "deployment-time,downscaler/*",
"inherited_annotations": "environment,owned-by", "inherited_annotations": "owned-by",
} }
} }
k8s.update_config(patch_sset_propagate_annotations) k8s.update_config(patch_sset_propagate_annotations)
@ -2224,8 +2155,6 @@ class EndToEndTestCase(unittest.TestCase):
{ {
"applicationId": "test-app", "applicationId": "test-app",
"batchSize": 100, "batchSize": 100,
"cpu": "100m",
"memory": "200Mi",
"database": "foo", "database": "foo",
"enableRecovery": True, "enableRecovery": True,
"tables": { "tables": {
@ -2247,7 +2176,7 @@ class EndToEndTestCase(unittest.TestCase):
"eventType": "test-event", "eventType": "test-event",
"idColumn": "id", "idColumn": "id",
"payloadColumn": "payload", "payloadColumn": "payload",
"ignoreRecovery": True "recoveryEventType": "test-event-dlq"
} }
} }
} }
@ -2568,10 +2497,7 @@ class EndToEndTestCase(unittest.TestCase):
self.assertTrue(self.has_postgresql_owner_reference(config_ep.metadata.owner_references, inverse), "config endpoint owner reference check failed") self.assertTrue(self.has_postgresql_owner_reference(config_ep.metadata.owner_references, inverse), "config endpoint owner reference check failed")
pdb = k8s.api.policy_v1.read_namespaced_pod_disruption_budget("postgres-{}-pdb".format(cluster_name), cluster_namespace) pdb = k8s.api.policy_v1.read_namespaced_pod_disruption_budget("postgres-{}-pdb".format(cluster_name), cluster_namespace)
self.assertTrue(self.has_postgresql_owner_reference(pdb.metadata.owner_references, inverse), "primary pod disruption budget owner reference check failed") self.assertTrue(self.has_postgresql_owner_reference(pdb.metadata.owner_references, inverse), "pod disruption owner reference check failed")
pdb = k8s.api.policy_v1.read_namespaced_pod_disruption_budget("postgres-{}-critical-op-pdb".format(cluster_name), cluster_namespace)
self.assertTrue(self.has_postgresql_owner_reference(pdb.metadata.owner_references, inverse), "pod disruption budget for critical operations owner reference check failed")
pg_secret = k8s.api.core_v1.read_namespaced_secret("postgres.{}.credentials.postgresql.acid.zalan.do".format(cluster_name), cluster_namespace) pg_secret = k8s.api.core_v1.read_namespaced_secret("postgres.{}.credentials.postgresql.acid.zalan.do".format(cluster_name), cluster_namespace)
self.assertTrue(self.has_postgresql_owner_reference(pg_secret.metadata.owner_references, inverse), "postgres secret owner reference check failed") self.assertTrue(self.has_postgresql_owner_reference(pg_secret.metadata.owner_references, inverse), "postgres secret owner reference check failed")

80
go.mod
View File

@ -1,75 +1,71 @@
module github.com/zalando/postgres-operator module github.com/zalando/postgres-operator
go 1.25.3 go 1.22
require ( require (
github.com/Masterminds/semver v1.5.0 github.com/aws/aws-sdk-go v1.53.8
github.com/aws/aws-sdk-go v1.55.8
github.com/golang/mock v1.6.0 github.com/golang/mock v1.6.0
github.com/lib/pq v1.10.9 github.com/lib/pq v1.10.9
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
github.com/r3labs/diff v1.1.0 github.com/r3labs/diff v1.1.0
github.com/sirupsen/logrus v1.9.3 github.com/sirupsen/logrus v1.9.3
github.com/stretchr/testify v1.11.1 github.com/stretchr/testify v1.9.0
golang.org/x/crypto v0.43.0 golang.org/x/crypto v0.26.0
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3
gopkg.in/yaml.v2 v2.4.0 gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.32.9 k8s.io/api v0.28.12
k8s.io/apiextensions-apiserver v0.25.9 k8s.io/apiextensions-apiserver v0.25.9
k8s.io/apimachinery v0.32.9 k8s.io/apimachinery v0.28.12
k8s.io/client-go v0.32.9 k8s.io/client-go v0.28.12
k8s.io/code-generator v0.25.9 k8s.io/code-generator v0.25.9
) )
require ( require (
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/evanphx/json-patch v4.12.0+incompatible // indirect
github.com/go-logr/logr v1.4.2 // indirect github.com/go-logr/logr v1.2.4 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.23.0 // indirect github.com/go-openapi/swag v0.22.3 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.4 // indirect github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.6.9 // indirect github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.7.0 // indirect github.com/google/go-cmp v0.6.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.6.0 // indirect github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/imdario/mergo v0.3.6 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/josharian/intern v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/kr/text v0.2.0 // indirect github.com/kr/text v0.2.0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect
github.com/moby/spdystream v0.5.0 // indirect github.com/moby/spdystream v0.2.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
github.com/spf13/pflag v1.0.5 // indirect github.com/spf13/pflag v1.0.5 // indirect
github.com/x448/float16 v0.8.4 // indirect golang.org/x/mod v0.17.0 // indirect
golang.org/x/mod v0.28.0 // indirect golang.org/x/net v0.25.0 // indirect
golang.org/x/net v0.45.0 // indirect golang.org/x/oauth2 v0.8.0 // indirect
golang.org/x/oauth2 v0.27.0 // indirect golang.org/x/sync v0.8.0 // indirect
golang.org/x/sync v0.17.0 // indirect golang.org/x/sys v0.23.0 // indirect
golang.org/x/sys v0.37.0 // indirect golang.org/x/term v0.23.0 // indirect
golang.org/x/term v0.36.0 // indirect golang.org/x/text v0.17.0 // indirect
golang.org/x/text v0.30.0 // indirect golang.org/x/time v0.3.0 // indirect
golang.org/x/time v0.9.0 // indirect golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect
golang.org/x/tools v0.37.0 // indirect google.golang.org/appengine v1.6.7 // indirect
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect google.golang.org/protobuf v1.33.0 // indirect
google.golang.org/protobuf v1.36.5 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect
k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7 // indirect k8s.io/klog/v2 v2.100.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/randfill v1.0.0 // indirect sigs.k8s.io/yaml v1.3.0 // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect
) )

183
go.sum
View File

@ -1,53 +1,52 @@
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= github.com/aws/aws-sdk-go v1.53.8 h1:eoqGb1WOHIrCFKo1d51cMcnt1ralfLFaEqRkC5Zzv8k=
github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= github.com/aws/aws-sdk-go v1.53.8/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ=
github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ=
github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
@ -70,8 +69,8 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -81,21 +80,18 @@ github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d h1:LznySqW8MqVeFh+p
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d/go.mod h1:u3hJ0kqCQu/cPpsu3RbCOPZ0d7V3IjPjv1adNRleM9I= github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d/go.mod h1:u3hJ0kqCQu/cPpsu3RbCOPZ0d7V3IjPjv1adNRleM9I=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM=
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4=
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/r3labs/diff v1.1.0 h1:V53xhrbTHrWFWq3gI4b94AjgEJOerO1+1l0xyHOBi8M= github.com/r3labs/diff v1.1.0 h1:V53xhrbTHrWFWq3gI4b94AjgEJOerO1+1l0xyHOBi8M=
github.com/r3labs/diff v1.1.0/go.mod h1:7WjXasNzi0vJetRcB/RqNl5dlIsmXcTTLmF5IoH6Xig= github.com/r3labs/diff v1.1.0/go.mod h1:7WjXasNzi0vJetRcB/RqNl5dlIsmXcTTLmF5IoH6Xig=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
@ -109,40 +105,39 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.43.0 h1:dduJYIi3A3KOfdGOHX8AVZ/jGiyPa3IbBozJ5kNuE04= golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw=
golang.org/x/crypto v0.43.0/go.mod h1:BFbav4mRNlXJL4wNeejLpWxB7wMbc79PdRGhWKncxR0= golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54=
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.28.0 h1:gQBtGhjxykdjY9YhZpSlZIsbnaE2+PgjfLWUQTnoZ1U= golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.28.0/go.mod h1:yfB/L0NOf/kmEbXjzCPOx1iK1fRutOydrCMsqRhEBxI= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.45.0 h1:RLBg5JKixCy82FtLJpeNlVM0nrSqpCRYzVU1n8kj0tM= golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
golang.org/x/net v0.45.0/go.mod h1:ECOoLqd5U3Lhyeyo/QDCEVQ4sNgYsqvCZ722XogGieY= golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8=
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -150,41 +145,38 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.37.0 h1:fdNQudmxPjkdUTPnLn5mdQv7Zwvbvpaxqs831goi9kQ= golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM=
golang.org/x/sys v0.37.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.36.0 h1:zMPR+aF8gfksFprF/Nc/rd1wRS1EI6nDBGyWAvDzx2Q= golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU=
golang.org/x/term v0.36.0/go.mod h1:Qu394IJq6V6dCBRgwqshf3mPF85AqzYEzofzRdZkWss= golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.30.0 h1:yznKA/E9zq54KzlzBEAWn1NXSQ8DIp/NYMy88xJjl4k= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc=
golang.org/x/text v0.30.0/go.mod h1:yDdHFIX9t+tORqspjENWgzaCVXgk0yYnYuSZ8UzzBVM= golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.37.0 h1:DVSRzp7FwePZW356yEAChSdNcQo6Nsp+fex1SUW09lE= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg=
golang.org/x/tools v0.37.0/go.mod h1:MBN5QPQtLMHVdvsbtarmTNukZDdgwdwlO5qGacAzF0w= golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY=
golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY=
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM=
golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c=
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
@ -194,34 +186,29 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.32.9 h1:q/59kk8lnecgG0grJqzrmXC1Jcl2hPWp9ltz0FQuoLI= k8s.io/api v0.28.12 h1:C2hpsaso18pqn0Dmkfnbv/YCctozTC3KGGuZ6bF7zhQ=
k8s.io/api v0.32.9/go.mod h1:jIfT3rwW4EU1IXZm9qjzSk/2j91k4CJL5vUULrxqp3Y= k8s.io/api v0.28.12/go.mod h1:qjswI+whxvf9LAKD4sEYHfy+WgHGWeH+H5sCRQMwZAQ=
k8s.io/apiextensions-apiserver v0.25.9 h1:Pycd6lm2auABp9wKQHCFSEPG+NPdFSTJXPST6NJFzB8= k8s.io/apiextensions-apiserver v0.25.9 h1:Pycd6lm2auABp9wKQHCFSEPG+NPdFSTJXPST6NJFzB8=
k8s.io/apiextensions-apiserver v0.25.9/go.mod h1:ijGxmSG1GLOEaWhTuaEr0M7KUeia3mWCZa6FFQqpt1M= k8s.io/apiextensions-apiserver v0.25.9/go.mod h1:ijGxmSG1GLOEaWhTuaEr0M7KUeia3mWCZa6FFQqpt1M=
k8s.io/apimachinery v0.32.9 h1:fXk8ktfsxrdThaEOAQFgkhCK7iyoyvS8nbYJ83o/SSs= k8s.io/apimachinery v0.28.12 h1:VepMEVOi9o7L/4wMAXJq+3BK9tqBIeerTB+HSOTKeo0=
k8s.io/apimachinery v0.32.9/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= k8s.io/apimachinery v0.28.12/go.mod h1:zUG757HaKs6Dc3iGtKjzIpBfqTM4yiRsEe3/E7NX15o=
k8s.io/client-go v0.32.9 h1:ZMyIQ1TEpTDAQni3L2gH1NZzyOA/gHfNcAazzCxMJ0c= k8s.io/client-go v0.28.12 h1:li7iRPRQF3vDki6gTxT/kXWJvw3BkJSdjVPVhDTZQec=
k8s.io/client-go v0.32.9/go.mod h1:2OT8aFSYvUjKGadaeT+AVbhkXQSpMAkiSb88Kz2WggI= k8s.io/client-go v0.28.12/go.mod h1:yEzH2Z+nEGlrnKyHJWcJsbOr5tGdIj04dj1TVQOg0wE=
k8s.io/code-generator v0.25.9 h1:lgyAV9AIRYNxZxgLRXqsCAtqJLHvakot41CjEqD5W0w= k8s.io/code-generator v0.25.9 h1:lgyAV9AIRYNxZxgLRXqsCAtqJLHvakot41CjEqD5W0w=
k8s.io/code-generator v0.25.9/go.mod h1:DHfpdhSUrwqF0f4oLqCtF8gYbqlndNetjBEz45nWzJI= k8s.io/code-generator v0.25.9/go.mod h1:DHfpdhSUrwqF0f4oLqCtF8gYbqlndNetjBEz45nWzJI=
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08= k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08=
k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E=
k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7 h1:cErOOTkQ3JW19o4lo91fFurouhP8NcoBvb7CkvhZZpk=
k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU=
k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ=
k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc=
sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps=
sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=

View File

@ -23,7 +23,6 @@ THE SOFTWARE.
package cmd package cmd
import ( import (
"context"
"log" "log"
"os" "os"
user "os/user" user "os/user"
@ -122,7 +121,7 @@ func connect(clusterName string, master bool, replica string, psql bool, user st
log.Fatal(err) log.Fatal(err)
} }
err = exec.StreamWithContext(context.TODO(), remotecommand.StreamOptions{ err = exec.Stream(remotecommand.StreamOptions{
Stdin: os.Stdin, Stdin: os.Stdin,
Stdout: os.Stdout, Stdout: os.Stdout,
Stderr: os.Stderr, Stderr: os.Stderr,

View File

@ -65,7 +65,7 @@ func version(namespace string) {
operatorDeployment := getPostgresOperator(client) operatorDeployment := getPostgresOperator(client)
if operatorDeployment.Name == "" { if operatorDeployment.Name == "" {
log.Fatalf("make sure zalando's postgres operator is running in namespace %s", namespace) log.Fatal("make sure zalando's postgres operator is running")
} }
operatorImage := operatorDeployment.Spec.Template.Spec.Containers[0].Image operatorImage := operatorDeployment.Spec.Template.Spec.Containers[0].Image
imageDetails := strings.Split(operatorImage, ":") imageDetails := strings.Split(operatorImage, ":")

View File

@ -1,71 +1,73 @@
module github.com/zalando/postgres-operator/kubectl-pg module github.com/zalando/postgres-operator/kubectl-pg
go 1.25 go 1.22
require ( require (
github.com/spf13/cobra v1.10.1 github.com/spf13/cobra v1.8.1
github.com/spf13/viper v1.21.0 github.com/spf13/viper v1.19.0
github.com/zalando/postgres-operator v1.14.0 github.com/zalando/postgres-operator v1.12.2
k8s.io/api v0.32.9 k8s.io/api v0.28.12
k8s.io/apiextensions-apiserver v0.25.9 k8s.io/apiextensions-apiserver v0.25.9
k8s.io/apimachinery v0.32.9 k8s.io/apimachinery v0.28.12
k8s.io/client-go v0.32.9 k8s.io/client-go v0.28.12
) )
require ( require (
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/emicklei/go-restful/v3 v3.9.0 // indirect
github.com/fsnotify/fsnotify v1.9.0 // indirect github.com/fsnotify/fsnotify v1.7.0 // indirect
github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-logr/logr v1.4.1 // indirect
github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonpointer v0.21.0 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.23.0 // indirect github.com/go-openapi/swag v0.22.3 // indirect
github.com/go-viper/mapstructure/v2 v2.4.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-cmp v0.5.9 // indirect
github.com/google/gofuzz v1.2.0 // indirect github.com/google/gofuzz v1.2.0 // indirect
github.com/google/uuid v1.6.0 // indirect github.com/google/uuid v1.4.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect github.com/hashicorp/hcl v1.0.0 // indirect
github.com/imdario/mergo v0.3.6 // indirect
github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/kr/text v0.2.0 // indirect github.com/kr/text v0.2.0 // indirect
github.com/magiconair/properties v1.8.7 // indirect
github.com/mailru/easyjson v0.7.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect
github.com/moby/spdystream v0.5.0 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/spdystream v0.2.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d // indirect github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect github.com/pelletier/go-toml/v2 v2.2.2 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect github.com/sagikazarmark/locafero v0.4.0 // indirect
github.com/pkg/errors v0.9.1 // indirect github.com/sagikazarmark/slog-shim v0.1.0 // indirect
github.com/sagikazarmark/locafero v0.11.0 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect github.com/sirupsen/logrus v1.9.3 // indirect
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect github.com/sourcegraph/conc v0.3.0 // indirect
github.com/spf13/afero v1.15.0 // indirect github.com/spf13/afero v1.11.0 // indirect
github.com/spf13/cast v1.10.0 // indirect github.com/spf13/cast v1.6.0 // indirect
github.com/spf13/pflag v1.0.10 // indirect github.com/spf13/pflag v1.0.5 // indirect
github.com/subosito/gotenv v1.6.0 // indirect github.com/subosito/gotenv v1.6.0 // indirect
github.com/x448/float16 v0.8.4 // indirect go.uber.org/multierr v1.11.0 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect golang.org/x/crypto v0.23.0 // indirect
golang.org/x/crypto v0.31.0 // indirect golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect
golang.org/x/net v0.30.0 // indirect golang.org/x/net v0.23.0 // indirect
golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/oauth2 v0.18.0 // indirect
golang.org/x/sys v0.29.0 // indirect golang.org/x/sys v0.20.0 // indirect
golang.org/x/term v0.27.0 // indirect golang.org/x/term v0.20.0 // indirect
golang.org/x/text v0.28.0 // indirect golang.org/x/text v0.15.0 // indirect
golang.org/x/time v0.7.0 // indirect golang.org/x/time v0.5.0 // indirect
google.golang.org/protobuf v1.35.1 // indirect google.golang.org/appengine v1.6.8 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect google.golang.org/protobuf v1.33.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect
k8s.io/klog/v2 v2.130.1 // indirect k8s.io/klog/v2 v2.100.1 // indirect
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f // indirect k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 // indirect sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
sigs.k8s.io/yaml v1.4.0 // indirect sigs.k8s.io/yaml v1.3.0 // indirect
) )

View File

@ -1,51 +1,51 @@
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8=
github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0=
github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA=
github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM=
github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE=
github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs=
github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ=
github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY=
github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE=
github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k=
github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g=
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI=
github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8=
github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs=
github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec=
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28=
github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
@ -61,10 +61,14 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@ -74,130 +78,151 @@ github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d h1:LznySqW8MqVeFh+p
github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d/go.mod h1:u3hJ0kqCQu/cPpsu3RbCOPZ0d7V3IjPjv1adNRleM9I= github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d/go.mod h1:u3hJ0kqCQu/cPpsu3RbCOPZ0d7V3IjPjv1adNRleM9I=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA=
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE=
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/ginkgo/v2 v2.9.4/go.mod h1:gCQYp2Q+kSoIj7ykSVb9nskRSsR6PUj4AiLywzIhbKM=
github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE=
github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg=
github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM=
github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs=
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ=
github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4=
github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE=
github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo=
github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0=
github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8=
github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY=
github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0=
github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo=
github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI=
github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg=
github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8=
github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/zalando/postgres-operator v1.14.0 h1:C8+n26C8v6fPB1SNW+Y8X6oQoEHufzGJXJzYPlix+zw= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
github.com/zalando/postgres-operator v1.14.0/go.mod h1:ZTHY3sVfHgLLRpTgyR/44JcumbACeJBjztr3o1yHBdc= github.com/zalando/postgres-operator v1.12.2 h1:HJLrGSJLKYkvdpHIxlAKhXWTeRsgDQki2s9QOyApUX0=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= github.com/zalando/postgres-operator v1.12.2/go.mod h1:tKNY4pMjnr5BhuzGiGngf1SPJ7K1vVRCmMkfmV9KZoQ=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0=
go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ=
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ= golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk=
golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA= google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4=
gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA=
gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
k8s.io/api v0.32.9 h1:q/59kk8lnecgG0grJqzrmXC1Jcl2hPWp9ltz0FQuoLI= k8s.io/api v0.28.12 h1:C2hpsaso18pqn0Dmkfnbv/YCctozTC3KGGuZ6bF7zhQ=
k8s.io/api v0.32.9/go.mod h1:jIfT3rwW4EU1IXZm9qjzSk/2j91k4CJL5vUULrxqp3Y= k8s.io/api v0.28.12/go.mod h1:qjswI+whxvf9LAKD4sEYHfy+WgHGWeH+H5sCRQMwZAQ=
k8s.io/apiextensions-apiserver v0.25.9 h1:Pycd6lm2auABp9wKQHCFSEPG+NPdFSTJXPST6NJFzB8= k8s.io/apiextensions-apiserver v0.25.9 h1:Pycd6lm2auABp9wKQHCFSEPG+NPdFSTJXPST6NJFzB8=
k8s.io/apiextensions-apiserver v0.25.9/go.mod h1:ijGxmSG1GLOEaWhTuaEr0M7KUeia3mWCZa6FFQqpt1M= k8s.io/apiextensions-apiserver v0.25.9/go.mod h1:ijGxmSG1GLOEaWhTuaEr0M7KUeia3mWCZa6FFQqpt1M=
k8s.io/apimachinery v0.32.9 h1:fXk8ktfsxrdThaEOAQFgkhCK7iyoyvS8nbYJ83o/SSs= k8s.io/apimachinery v0.28.12 h1:VepMEVOi9o7L/4wMAXJq+3BK9tqBIeerTB+HSOTKeo0=
k8s.io/apimachinery v0.32.9/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= k8s.io/apimachinery v0.28.12/go.mod h1:zUG757HaKs6Dc3iGtKjzIpBfqTM4yiRsEe3/E7NX15o=
k8s.io/client-go v0.32.9 h1:ZMyIQ1TEpTDAQni3L2gH1NZzyOA/gHfNcAazzCxMJ0c= k8s.io/client-go v0.28.12 h1:li7iRPRQF3vDki6gTxT/kXWJvw3BkJSdjVPVhDTZQec=
k8s.io/client-go v0.32.9/go.mod h1:2OT8aFSYvUjKGadaeT+AVbhkXQSpMAkiSb88Kz2WggI= k8s.io/client-go v0.28.12/go.mod h1:yEzH2Z+nEGlrnKyHJWcJsbOr5tGdIj04dj1TVQOg0wE=
k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg=
k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f h1:GA7//TjRY9yWGy1poLzYYJJ4JRdzg3+O6e8I+e+8T5Y= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ=
k8s.io/kube-openapi v0.0.0-20241105132330-32ad38e42d3f/go.mod h1:R/HEjbvWI0qdfb8viZUeVZm0X6IZnxAydC7YU42CMw4= k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk=
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo=
sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2 h1:MdmvkGuXi/8io6ixD5wud3vOLwc1rj0aNqRlpuvjmwA= sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE=
sigs.k8s.io/structured-merge-diff/v4 v4.4.2/go.mod h1:N8f93tFZh9U6vpxwRArLiikrE5/2tiu1w1AGfACIGE4= sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E=
sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo=
sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8=

View File

@ -25,11 +25,11 @@ RUN apt-get update \
&& curl --silent https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ && curl --silent https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \
&& apt-get update \ && apt-get update \
&& apt-get install --no-install-recommends -y \ && apt-get install --no-install-recommends -y \
postgresql-client-17 \
postgresql-client-16 \ postgresql-client-16 \
postgresql-client-15 \ postgresql-client-15 \
postgresql-client-14 \ postgresql-client-14 \
postgresql-client-13 \ postgresql-client-13 \
postgresql-client-12 \
&& apt-get clean \ && apt-get clean \
&& rm -rf /var/lib/apt/lists/* && rm -rf /var/lib/apt/lists/*

View File

@ -122,21 +122,7 @@ function aws_upload {
function gcs_upload { function gcs_upload {
PATH_TO_BACKUP=gs://$LOGICAL_BACKUP_S3_BUCKET"/"$LOGICAL_BACKUP_S3_BUCKET_PREFIX"/"$SCOPE$LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"/logical_backups/"$(date +%s).sql.gz PATH_TO_BACKUP=gs://$LOGICAL_BACKUP_S3_BUCKET"/"$LOGICAL_BACKUP_S3_BUCKET_PREFIX"/"$SCOPE$LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"/logical_backups/"$(date +%s).sql.gz
#Set local LOGICAL_GOOGLE_APPLICATION_CREDENTIALS to nothing or gsutil -o Credentials:gs_service_key_file=$LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS cp - "$PATH_TO_BACKUP"
#value of LOGICAL_GOOGLE_APPLICATION_CREDENTIALS env var. Needed
#because `set -o nounset` is globally set
local LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS=${LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS:-}
GSUTIL_OPTIONS=("-o" "Credentials:gs_service_key_file=$LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS")
#If GOOGLE_APPLICATION_CREDENTIALS is not set try to get
#creds from metadata
if [[ -z $LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS ]]
then
GSUTIL_OPTIONS[1]="GoogleCompute:service_account=default"
fi
gsutil ${GSUTIL_OPTIONS[@]} cp - "$PATH_TO_BACKUP"
} }
function upload { function upload {

View File

@ -10,7 +10,7 @@ metadata:
# "delete-date": "2020-08-31" # can only be deleted on that day if "delete-date "key is configured # "delete-date": "2020-08-31" # can only be deleted on that day if "delete-date "key is configured
# "delete-clustername": "acid-test-cluster" # can only be deleted when name matches if "delete-clustername" key is configured # "delete-clustername": "acid-test-cluster" # can only be deleted when name matches if "delete-clustername" key is configured
spec: spec:
dockerImage: ghcr.io/zalando/spilo-17:4.0-p3 dockerImage: ghcr.io/zalando/spilo-16:3.3-p1
teamId: "acid" teamId: "acid"
numberOfInstances: 2 numberOfInstances: 2
users: # Application/Robot users users: # Application/Robot users
@ -48,7 +48,7 @@ spec:
defaultRoles: true defaultRoles: true
defaultUsers: false defaultUsers: false
postgresql: postgresql:
version: "17" version: "16"
parameters: # Expert section parameters: # Expert section
shared_buffers: "32MB" shared_buffers: "32MB"
max_connections: "10" max_connections: "10"

View File

@ -34,7 +34,7 @@ data:
default_memory_request: 100Mi default_memory_request: 100Mi
# delete_annotation_date_key: delete-date # delete_annotation_date_key: delete-date
# delete_annotation_name_key: delete-clustername # delete_annotation_name_key: delete-clustername
docker_image: ghcr.io/zalando/spilo-17:4.0-p3 docker_image: ghcr.io/zalando/spilo-16:3.3-p1
# downscaler_annotations: "deployment-time,downscaler/*" # downscaler_annotations: "deployment-time,downscaler/*"
enable_admin_role_for_users: "true" enable_admin_role_for_users: "true"
enable_crd_registration: "true" enable_crd_registration: "true"
@ -86,7 +86,7 @@ data:
# logical_backup_cpu_limit: "" # logical_backup_cpu_limit: ""
# logical_backup_cpu_request: "" # logical_backup_cpu_request: ""
logical_backup_cronjob_environment_secret: "" logical_backup_cronjob_environment_secret: ""
logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.15.0" logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0"
# logical_backup_google_application_credentials: "" # logical_backup_google_application_credentials: ""
logical_backup_job_prefix: "logical-backup-" logical_backup_job_prefix: "logical-backup-"
# logical_backup_memory_limit: "" # logical_backup_memory_limit: ""
@ -112,7 +112,7 @@ data:
min_cpu_limit: 250m min_cpu_limit: 250m
min_instances: "-1" min_instances: "-1"
min_memory_limit: 250Mi min_memory_limit: 250Mi
minimal_major_version: "13" minimal_major_version: "12"
# node_readiness_label: "status:ready" # node_readiness_label: "status:ready"
# node_readiness_label_merge: "OR" # node_readiness_label_merge: "OR"
oauth_token_secret_name: postgresql-operator oauth_token_secret_name: postgresql-operator
@ -162,7 +162,7 @@ data:
spilo_privileged: "false" spilo_privileged: "false"
storage_resize_mode: "pvc" storage_resize_mode: "pvc"
super_username: postgres super_username: postgres
target_major_version: "17" target_major_version: "16"
team_admin_role: "admin" team_admin_role: "admin"
team_api_role_configuration: "log_statement:all" team_api_role_configuration: "log_statement:all"
teams_api_url: http://fake-teams-api.default.svc.cluster.local teams_api_url: http://fake-teams-api.default.svc.cluster.local

View File

@ -31,21 +31,11 @@ spec:
version: "13" version: "13"
sidecars: sidecars:
- name: "exporter" - name: "exporter"
image: "quay.io/prometheuscommunity/postgres-exporter:v0.15.0" image: "wrouesnel/postgres_exporter"
ports: ports:
- name: exporter - name: exporter
containerPort: 9187 containerPort: 9187
protocol: TCP protocol: TCP
env:
- name: DATA_SOURCE_URI
value: ":5432/?sslmode=disable"
- name: DATA_SOURCE_USER
value: "postgres"
- name: DATA_SOURCE_PASS
valueFrom:
secretKeyRef:
name: postgres.test-pg.credentials.postgresql.acid.zalan.do
key: password
resources: resources:
limits: limits:
cpu: 500m cpu: 500m

View File

@ -17,4 +17,4 @@ spec:
preparedDatabases: preparedDatabases:
bar: {} bar: {}
postgresql: postgresql:
version: "13" version: "12"

View File

@ -17,4 +17,4 @@ spec:
preparedDatabases: preparedDatabases:
bar: {} bar: {}
postgresql: postgresql:
version: "17" version: "16"

View File

@ -59,20 +59,13 @@ rules:
- get - get
- patch - patch
- update - update
# to read configuration from ConfigMaps and help Patroni manage the cluster if endpoints are not used # to read configuration from ConfigMaps
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
- configmaps - configmaps
verbs: verbs:
- create
- delete
- deletecollection
- get - get
- list
- patch
- update
- watch
# to send events to the CRs # to send events to the CRs
- apiGroups: - apiGroups:
- "" - ""
@ -85,7 +78,7 @@ rules:
- patch - patch
- update - update
- watch - watch
# to manage endpoints which are also used by Patroni (if it is using config maps) # to manage endpoints which are also used by Patroni
- apiGroups: - apiGroups:
- "" - ""
resources: resources:
@ -256,21 +249,7 @@ kind: ClusterRole
metadata: metadata:
name: postgres-pod name: postgres-pod
rules: rules:
# Patroni needs to watch and manage config maps (or endpoints) # Patroni needs to watch and manage endpoints
- apiGroups:
- ""
resources:
- configmaps
verbs:
- create
- delete
- deletecollection
- get
- list
- patch
- update
- watch
# Patroni needs to watch and manage endpoints (or config maps)
- apiGroups: - apiGroups:
- "" - ""
resources: resources:

View File

@ -66,7 +66,7 @@ spec:
type: string type: string
docker_image: docker_image:
type: string type: string
default: "ghcr.io/zalando/spilo-17:4.0-p3" default: "ghcr.io/zalando/spilo-16:3.3-p1"
enable_crd_registration: enable_crd_registration:
type: boolean type: boolean
default: true default: true
@ -165,10 +165,10 @@ spec:
type: string type: string
minimal_major_version: minimal_major_version:
type: string type: string
default: "13" default: "12"
target_major_version: target_major_version:
type: string type: string
default: "17" default: "16"
kubernetes: kubernetes:
type: object type: object
properties: properties:
@ -374,28 +374,28 @@ spec:
properties: properties:
default_cpu_limit: default_cpu_limit:
type: string type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$|^$' pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
default_cpu_request: default_cpu_request:
type: string type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$|^$' pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
default_memory_limit: default_memory_limit:
type: string type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$|^$' pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
default_memory_request: default_memory_request:
type: string type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$|^$' pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
max_cpu_request: max_cpu_request:
type: string type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$|^$' pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
max_memory_request: max_memory_request:
type: string type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$|^$' pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
min_cpu_limit: min_cpu_limit:
type: string type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$|^$' pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
min_memory_limit: min_memory_limit:
type: string type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$|^$' pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
timeouts: timeouts:
type: object type: object
properties: properties:
@ -508,7 +508,7 @@ spec:
pattern: '^(\d+m|\d+(\.\d{1,3})?)$' pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
logical_backup_docker_image: logical_backup_docker_image:
type: string type: string
default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.15.0" default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0"
logical_backup_google_application_credentials: logical_backup_google_application_credentials:
type: string type: string
logical_backup_job_prefix: logical_backup_job_prefix:

View File

@ -19,7 +19,7 @@ spec:
serviceAccountName: postgres-operator serviceAccountName: postgres-operator
containers: containers:
- name: postgres-operator - name: postgres-operator
image: ghcr.io/zalando/postgres-operator:v1.15.0 image: ghcr.io/zalando/postgres-operator:v1.13.0
imagePullPolicy: IfNotPresent imagePullPolicy: IfNotPresent
resources: resources:
requests: requests:

View File

@ -3,7 +3,7 @@ kind: OperatorConfiguration
metadata: metadata:
name: postgresql-operator-default-configuration name: postgresql-operator-default-configuration
configuration: configuration:
docker_image: ghcr.io/zalando/spilo-17:4.0-p3 docker_image: ghcr.io/zalando/spilo-16:3.3-p1
# enable_crd_registration: true # enable_crd_registration: true
# crd_categories: # crd_categories:
# - all # - all
@ -39,8 +39,8 @@ configuration:
major_version_upgrade_mode: "manual" major_version_upgrade_mode: "manual"
# major_version_upgrade_team_allow_list: # major_version_upgrade_team_allow_list:
# - acid # - acid
minimal_major_version: "13" minimal_major_version: "12"
target_major_version: "17" target_major_version: "16"
kubernetes: kubernetes:
# additional_pod_capabilities: # additional_pod_capabilities:
# - "SYS_NICE" # - "SYS_NICE"
@ -168,7 +168,7 @@ configuration:
# logical_backup_cpu_request: "" # logical_backup_cpu_request: ""
# logical_backup_memory_limit: "" # logical_backup_memory_limit: ""
# logical_backup_memory_request: "" # logical_backup_memory_request: ""
logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.15.0" logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0"
# logical_backup_google_application_credentials: "" # logical_backup_google_application_credentials: ""
logical_backup_job_prefix: "logical-backup-" logical_backup_job_prefix: "logical-backup-"
logical_backup_provider: "s3" logical_backup_provider: "s3"

View File

@ -373,11 +373,11 @@ spec:
version: version:
type: string type: string
enum: enum:
- "12"
- "13" - "13"
- "14" - "14"
- "15" - "15"
- "16" - "16"
- "17"
parameters: parameters:
type: object type: object
additionalProperties: additionalProperties:
@ -512,9 +512,6 @@ spec:
type: string type: string
batchSize: batchSize:
type: integer type: integer
cpu:
type: string
pattern: '^(\d+m|\d+(\.\d{1,3})?)$'
database: database:
type: string type: string
enableRecovery: enableRecovery:
@ -523,9 +520,6 @@ spec:
type: object type: object
additionalProperties: additionalProperties:
type: string type: string
memory:
type: string
pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$'
tables: tables:
type: object type: object
additionalProperties: additionalProperties:
@ -537,8 +531,6 @@ spec:
type: string type: string
idColumn: idColumn:
type: string type: string
ignoreRecovery:
type: boolean
payloadColumn: payloadColumn:
type: string type: string
recoveryEventType: recoveryEventType:

View File

@ -8,7 +8,7 @@ spec:
size: 1Gi size: 1Gi
numberOfInstances: 1 numberOfInstances: 1
postgresql: postgresql:
version: "17" version: "16"
# Make this a standby cluster and provide either the s3 bucket path of source cluster or the remote primary host for continuous streaming. # Make this a standby cluster and provide either the s3 bucket path of source cluster or the remote primary host for continuous streaming.
standby: standby:
# s3_wal_path: "s3://mybucket/spilo/acid-minimal-cluster/abcd1234-2a4b-4b2a-8c9c-c1234defg567/wal/14/" # s3_wal_path: "s3://mybucket/spilo/acid-minimal-cluster/abcd1234-2a4b-4b2a-8c9c-c1234defg567/wal/14/"

View File

@ -595,6 +595,9 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
"version": { "version": {
Type: "string", Type: "string",
Enum: []apiextv1.JSON{ Enum: []apiextv1.JSON{
{
Raw: []byte(`"12"`),
},
{ {
Raw: []byte(`"13"`), Raw: []byte(`"13"`),
}, },
@ -607,9 +610,6 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{
{ {
Raw: []byte(`"16"`), Raw: []byte(`"16"`),
}, },
{
Raw: []byte(`"17"`),
},
}, },
}, },
"parameters": { "parameters": {
@ -1165,7 +1165,6 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
}, },
"enable_spilo_wal_path_compat": { "enable_spilo_wal_path_compat": {
Type: "boolean", Type: "boolean",
Description: "deprecated",
}, },
"enable_team_id_clustername_prefix": { "enable_team_id_clustername_prefix": {
Type: "boolean", Type: "boolean",
@ -1574,35 +1573,35 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{
Properties: map[string]apiextv1.JSONSchemaProps{ Properties: map[string]apiextv1.JSONSchemaProps{
"default_cpu_limit": { "default_cpu_limit": {
Type: "string", Type: "string",
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$|^$", Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
}, },
"default_cpu_request": { "default_cpu_request": {
Type: "string", Type: "string",
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$|^$", Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
}, },
"default_memory_limit": { "default_memory_limit": {
Type: "string", Type: "string",
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$|^$", Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
}, },
"default_memory_request": { "default_memory_request": {
Type: "string", Type: "string",
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$|^$", Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
}, },
"max_cpu_request": { "max_cpu_request": {
Type: "string", Type: "string",
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$|^$", Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
}, },
"max_memory_request": { "max_memory_request": {
Type: "string", Type: "string",
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$|^$", Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
}, },
"min_cpu_limit": { "min_cpu_limit": {
Type: "string", Type: "string",
Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$|^$", Pattern: "^(\\d+m|\\d+(\\.\\d{1,3})?)$",
}, },
"min_memory_limit": { "min_memory_limit": {
Type: "string", Type: "string",
Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$|^$", Pattern: "^(\\d+(e\\d+)?|\\d+(\\.\\d+)?(e\\d+)?[EPTGMK]i?)$",
}, },
}, },
}, },

View File

@ -49,8 +49,8 @@ type PostgresUsersConfiguration struct {
type MajorVersionUpgradeConfiguration struct { type MajorVersionUpgradeConfiguration struct {
MajorVersionUpgradeMode string `json:"major_version_upgrade_mode" default:"manual"` // off - no actions, manual - manifest triggers action, full - manifest and minimal version violation trigger upgrade MajorVersionUpgradeMode string `json:"major_version_upgrade_mode" default:"manual"` // off - no actions, manual - manifest triggers action, full - manifest and minimal version violation trigger upgrade
MajorVersionUpgradeTeamAllowList []string `json:"major_version_upgrade_team_allow_list,omitempty"` MajorVersionUpgradeTeamAllowList []string `json:"major_version_upgrade_team_allow_list,omitempty"`
MinimalMajorVersion string `json:"minimal_major_version" default:"13"` MinimalMajorVersion string `json:"minimal_major_version" default:"12"`
TargetMajorVersion string `json:"target_major_version" default:"17"` TargetMajorVersion string `json:"target_major_version" default:"16"`
} }
// KubernetesMetaConfiguration defines k8s conf required for all Postgres clusters and the operator itself // KubernetesMetaConfiguration defines k8s conf required for all Postgres clusters and the operator itself

View File

@ -220,7 +220,6 @@ type Sidecar struct {
DockerImage string `json:"image,omitempty"` DockerImage string `json:"image,omitempty"`
Ports []v1.ContainerPort `json:"ports,omitempty"` Ports []v1.ContainerPort `json:"ports,omitempty"`
Env []v1.EnvVar `json:"env,omitempty"` Env []v1.EnvVar `json:"env,omitempty"`
Command []string `json:"command,omitempty"`
} }
// UserFlags defines flags (such as superuser, nologin) that could be assigned to individual users // UserFlags defines flags (such as superuser, nologin) that could be assigned to individual users
@ -259,8 +258,6 @@ type Stream struct {
Tables map[string]StreamTable `json:"tables"` Tables map[string]StreamTable `json:"tables"`
Filter map[string]*string `json:"filter,omitempty"` Filter map[string]*string `json:"filter,omitempty"`
BatchSize *uint32 `json:"batchSize,omitempty"` BatchSize *uint32 `json:"batchSize,omitempty"`
CPU *string `json:"cpu,omitempty"`
Memory *string `json:"memory,omitempty"`
EnableRecovery *bool `json:"enableRecovery,omitempty"` EnableRecovery *bool `json:"enableRecovery,omitempty"`
} }
@ -268,7 +265,6 @@ type Stream struct {
type StreamTable struct { type StreamTable struct {
EventType string `json:"eventType"` EventType string `json:"eventType"`
RecoveryEventType string `json:"recoveryEventType,omitempty"` RecoveryEventType string `json:"recoveryEventType,omitempty"`
IgnoreRecovery *bool `json:"ignoreRecovery,omitempty"`
IdColumn *string `json:"idColumn,omitempty"` IdColumn *string `json:"idColumn,omitempty"`
PayloadColumn *string `json:"payloadColumn,omitempty"` PayloadColumn *string `json:"payloadColumn,omitempty"`
} }

View File

@ -219,7 +219,7 @@ var unmarshalCluster = []struct {
"127.0.0.1/32" "127.0.0.1/32"
], ],
"postgresql": { "postgresql": {
"version": "17", "version": "16",
"parameters": { "parameters": {
"shared_buffers": "32MB", "shared_buffers": "32MB",
"max_connections": "10", "max_connections": "10",
@ -279,7 +279,7 @@ var unmarshalCluster = []struct {
}, },
Spec: PostgresSpec{ Spec: PostgresSpec{
PostgresqlParam: PostgresqlParam{ PostgresqlParam: PostgresqlParam{
PgVersion: "17", PgVersion: "16",
Parameters: map[string]string{ Parameters: map[string]string{
"shared_buffers": "32MB", "shared_buffers": "32MB",
"max_connections": "10", "max_connections": "10",
@ -339,7 +339,7 @@ var unmarshalCluster = []struct {
}, },
Error: "", Error: "",
}, },
marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"17","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"pod_priority_class_name":"spilo-pod-priority","volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"enableShmVolume":false,"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"acid","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`), marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"16","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"pod_priority_class_name":"spilo-pod-priority","volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"enableShmVolume":false,"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"acid","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`),
err: nil}, err: nil},
{ {
about: "example with clone", about: "example with clone",
@ -404,7 +404,7 @@ var postgresqlList = []struct {
out PostgresqlList out PostgresqlList
err error err error
}{ }{
{"expect success", []byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace":"default","resourceVersion":"30446957","selfLink":"/apis/acid.zalan.do/v1/namespaces/default/postgresqls/acid-testcluster42","uid":"857cd208-33dc-11e7-b20a-0699041e4b03"},"spec":{"allowedSourceRanges":["185.85.220.0/22"],"numberOfInstances":1,"postgresql":{"version":"17"},"teamId":"acid","volume":{"size":"10Gi"}},"status":{"PostgresClusterStatus":"Running"}}],"kind":"List","metadata":{},"resourceVersion":"","selfLink":""}`), {"expect success", []byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace":"default","resourceVersion":"30446957","selfLink":"/apis/acid.zalan.do/v1/namespaces/default/postgresqls/acid-testcluster42","uid":"857cd208-33dc-11e7-b20a-0699041e4b03"},"spec":{"allowedSourceRanges":["185.85.220.0/22"],"numberOfInstances":1,"postgresql":{"version":"16"},"teamId":"acid","volume":{"size":"10Gi"}},"status":{"PostgresClusterStatus":"Running"}}],"kind":"List","metadata":{},"resourceVersion":"","selfLink":""}`),
PostgresqlList{ PostgresqlList{
TypeMeta: metav1.TypeMeta{ TypeMeta: metav1.TypeMeta{
Kind: "List", Kind: "List",
@ -425,7 +425,7 @@ var postgresqlList = []struct {
}, },
Spec: PostgresSpec{ Spec: PostgresSpec{
ClusterName: "testcluster42", ClusterName: "testcluster42",
PostgresqlParam: PostgresqlParam{PgVersion: "17"}, PostgresqlParam: PostgresqlParam{PgVersion: "16"},
Volume: Volume{Size: "10Gi"}, Volume: Volume{Size: "10Gi"},
TeamID: "acid", TeamID: "acid",
AllowedSourceRanges: []string{"185.85.220.0/22"}, AllowedSourceRanges: []string{"185.85.220.0/22"},

View File

@ -2,7 +2,7 @@
// +build !ignore_autogenerated // +build !ignore_autogenerated
/* /*
Copyright 2025 Compose, Zalando SE Copyright 2024 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal
@ -1277,11 +1277,6 @@ func (in *Sidecar) DeepCopyInto(out *Sidecar) {
(*in)[i].DeepCopyInto(&(*out)[i]) (*in)[i].DeepCopyInto(&(*out)[i])
} }
} }
if in.Command != nil {
in, out := &in.Command, &out.Command
*out = make([]string, len(*in))
copy(*out, *in)
}
return return
} }
@ -1341,16 +1336,6 @@ func (in *Stream) DeepCopyInto(out *Stream) {
*out = new(uint32) *out = new(uint32)
**out = **in **out = **in
} }
if in.CPU != nil {
in, out := &in.CPU, &out.CPU
*out = new(string)
**out = **in
}
if in.Memory != nil {
in, out := &in.Memory, &out.Memory
*out = new(string)
**out = **in
}
if in.EnableRecovery != nil { if in.EnableRecovery != nil {
in, out := &in.EnableRecovery, &out.EnableRecovery in, out := &in.EnableRecovery, &out.EnableRecovery
*out = new(bool) *out = new(bool)
@ -1372,11 +1357,6 @@ func (in *Stream) DeepCopy() *Stream {
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *StreamTable) DeepCopyInto(out *StreamTable) { func (in *StreamTable) DeepCopyInto(out *StreamTable) {
*out = *in *out = *in
if in.IgnoreRecovery != nil {
in, out := &in.IgnoreRecovery, &out.IgnoreRecovery
*out = new(bool)
**out = **in
}
if in.IdColumn != nil { if in.IdColumn != nil {
in, out := &in.IdColumn, &out.IdColumn in, out := &in.IdColumn, &out.IdColumn
*out = new(string) *out = new(string)

View File

@ -65,12 +65,11 @@ type kubeResources struct {
PatroniConfigMaps map[string]*v1.ConfigMap PatroniConfigMaps map[string]*v1.ConfigMap
Secrets map[types.UID]*v1.Secret Secrets map[types.UID]*v1.Secret
Statefulset *appsv1.StatefulSet Statefulset *appsv1.StatefulSet
VolumeClaims map[types.UID]*v1.PersistentVolumeClaim PodDisruptionBudget *policyv1.PodDisruptionBudget
PrimaryPodDisruptionBudget *policyv1.PodDisruptionBudget
CriticalOpPodDisruptionBudget *policyv1.PodDisruptionBudget
LogicalBackupJob *batchv1.CronJob LogicalBackupJob *batchv1.CronJob
Streams map[string]*zalandov1.FabricEventStream Streams map[string]*zalandov1.FabricEventStream
//Pods are treated separately //Pods are treated separately
//PVCs are treated separately
} }
// Cluster describes postgresql cluster // Cluster describes postgresql cluster
@ -110,13 +109,6 @@ type compareStatefulsetResult struct {
replace bool replace bool
rollingUpdate bool rollingUpdate bool
reasons []string reasons []string
deletedPodAnnotations []string
}
type compareLogicalBackupJobResult struct {
match bool
reasons []string
deletedPodAnnotations []string
} }
// New creates a new cluster. This function should be called from a controller. // New creates a new cluster. This function should be called from a controller.
@ -148,7 +140,6 @@ func New(cfg Config, kubeClient k8sutil.KubernetesClient, pgSpec acidv1.Postgres
Endpoints: make(map[PostgresRole]*v1.Endpoints), Endpoints: make(map[PostgresRole]*v1.Endpoints),
PatroniEndpoints: make(map[string]*v1.Endpoints), PatroniEndpoints: make(map[string]*v1.Endpoints),
PatroniConfigMaps: make(map[string]*v1.ConfigMap), PatroniConfigMaps: make(map[string]*v1.ConfigMap),
VolumeClaims: make(map[types.UID]*v1.PersistentVolumeClaim),
Streams: make(map[string]*zalandov1.FabricEventStream)}, Streams: make(map[string]*zalandov1.FabricEventStream)},
userSyncStrategy: users.DefaultUserSyncStrategy{ userSyncStrategy: users.DefaultUserSyncStrategy{
PasswordEncryption: passwordEncryption, PasswordEncryption: passwordEncryption,
@ -344,10 +335,14 @@ func (c *Cluster) Create() (err error) {
c.logger.Infof("secrets have been successfully created") c.logger.Infof("secrets have been successfully created")
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Secrets", "The secrets have been successfully created") c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Secrets", "The secrets have been successfully created")
if err = c.createPodDisruptionBudgets(); err != nil { if c.PodDisruptionBudget != nil {
return fmt.Errorf("could not create pod disruption budgets: %v", err) return fmt.Errorf("pod disruption budget already exists in the cluster")
} }
c.logger.Info("pod disruption budgets have been successfully created") pdb, err := c.createPodDisruptionBudget()
if err != nil {
return fmt.Errorf("could not create pod disruption budget: %v", err)
}
c.logger.Infof("pod disruption budget %q has been successfully created", util.NameFromMeta(pdb.ObjectMeta))
if c.Statefulset != nil { if c.Statefulset != nil {
return fmt.Errorf("statefulset already exists in the cluster") return fmt.Errorf("statefulset already exists in the cluster")
@ -368,11 +363,6 @@ func (c *Cluster) Create() (err error) {
c.logger.Infof("pods are ready") c.logger.Infof("pods are ready")
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "StatefulSet", "Pods are ready") c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "StatefulSet", "Pods are ready")
// sync volume may already transition volumes to gp3, if iops/throughput or type is specified
if err = c.syncVolumes(); err != nil {
return err
}
// sync resources created by Patroni // sync resources created by Patroni
if err = c.syncPatroniResources(); err != nil { if err = c.syncPatroniResources(); err != nil {
c.logger.Warnf("Patroni resources not yet synced: %v", err) c.logger.Warnf("Patroni resources not yet synced: %v", err)
@ -435,7 +425,6 @@ func (c *Cluster) Create() (err error) {
} }
func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compareStatefulsetResult { func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compareStatefulsetResult {
deletedPodAnnotations := []string{}
reasons := make([]string, 0) reasons := make([]string, 0)
var match, needsRollUpdate, needsReplace bool var match, needsRollUpdate, needsReplace bool
@ -450,7 +439,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
needsReplace = true needsReplace = true
reasons = append(reasons, "new statefulset's ownerReferences do not match") reasons = append(reasons, "new statefulset's ownerReferences do not match")
} }
if changed, reason := c.compareAnnotations(c.Statefulset.Annotations, statefulSet.Annotations, nil); changed { if changed, reason := c.compareAnnotations(c.Statefulset.Annotations, statefulSet.Annotations); changed {
match = false match = false
needsReplace = true needsReplace = true
reasons = append(reasons, "new statefulset's annotations do not match: "+reason) reasons = append(reasons, "new statefulset's annotations do not match: "+reason)
@ -524,7 +513,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
} }
} }
if changed, reason := c.compareAnnotations(c.Statefulset.Spec.Template.Annotations, statefulSet.Spec.Template.Annotations, &deletedPodAnnotations); changed { if changed, reason := c.compareAnnotations(c.Statefulset.Spec.Template.Annotations, statefulSet.Spec.Template.Annotations); changed {
match = false match = false
needsReplace = true needsReplace = true
reasons = append(reasons, "new statefulset's pod template metadata annotations does not match "+reason) reasons = append(reasons, "new statefulset's pod template metadata annotations does not match "+reason)
@ -546,7 +535,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
reasons = append(reasons, fmt.Sprintf("new statefulset's name for volume %d does not match the current one", i)) reasons = append(reasons, fmt.Sprintf("new statefulset's name for volume %d does not match the current one", i))
continue continue
} }
if changed, reason := c.compareAnnotations(c.Statefulset.Spec.VolumeClaimTemplates[i].Annotations, statefulSet.Spec.VolumeClaimTemplates[i].Annotations, nil); changed { if changed, reason := c.compareAnnotations(c.Statefulset.Spec.VolumeClaimTemplates[i].Annotations, statefulSet.Spec.VolumeClaimTemplates[i].Annotations); changed {
needsReplace = true needsReplace = true
reasons = append(reasons, fmt.Sprintf("new statefulset's annotations for volume %q do not match the current ones: %s", name, reason)) reasons = append(reasons, fmt.Sprintf("new statefulset's annotations for volume %q do not match the current ones: %s", name, reason))
} }
@ -584,7 +573,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa
match = false match = false
} }
return &compareStatefulsetResult{match: match, reasons: reasons, rollingUpdate: needsRollUpdate, replace: needsReplace, deletedPodAnnotations: deletedPodAnnotations} return &compareStatefulsetResult{match: match, reasons: reasons, rollingUpdate: needsRollUpdate, replace: needsReplace}
} }
type containerCondition func(a, b v1.Container) bool type containerCondition func(a, b v1.Container) bool
@ -786,7 +775,7 @@ func volumeMountExists(mount v1.VolumeMount, mounts []v1.VolumeMount) bool {
return false return false
} }
func (c *Cluster) compareAnnotations(old, new map[string]string, removedList *[]string) (bool, string) { func (c *Cluster) compareAnnotations(old, new map[string]string) (bool, string) {
reason := "" reason := ""
ignoredAnnotations := make(map[string]bool) ignoredAnnotations := make(map[string]bool)
for _, ignore := range c.OpConfig.IgnoredAnnotations { for _, ignore := range c.OpConfig.IgnoredAnnotations {
@ -799,9 +788,6 @@ func (c *Cluster) compareAnnotations(old, new map[string]string, removedList *[]
} }
if _, ok := new[key]; !ok { if _, ok := new[key]; !ok {
reason += fmt.Sprintf(" Removed %q.", key) reason += fmt.Sprintf(" Removed %q.", key)
if removedList != nil {
*removedList = append(*removedList, key)
}
} }
} }
@ -841,57 +827,44 @@ func (c *Cluster) compareServices(old, new *v1.Service) (bool, string) {
return false, "new service's owner references do not match the current ones" return false, "new service's owner references do not match the current ones"
} }
if !reflect.DeepEqual(old.Spec.Selector, new.Spec.Selector) {
return false, "new service's selector does not match the current one"
}
if old.Spec.ExternalTrafficPolicy != new.Spec.ExternalTrafficPolicy {
return false, "new service's ExternalTrafficPolicy does not match the current one"
}
return true, "" return true, ""
} }
func (c *Cluster) compareLogicalBackupJob(cur, new *batchv1.CronJob) *compareLogicalBackupJobResult { func (c *Cluster) compareLogicalBackupJob(cur, new *batchv1.CronJob) (match bool, reason string) {
deletedPodAnnotations := []string{}
reasons := make([]string, 0)
match := true
if cur.Spec.Schedule != new.Spec.Schedule { if cur.Spec.Schedule != new.Spec.Schedule {
match = false return false, fmt.Sprintf("new job's schedule %q does not match the current one %q",
reasons = append(reasons, fmt.Sprintf("new job's schedule %q does not match the current one %q", new.Spec.Schedule, cur.Spec.Schedule)) new.Spec.Schedule, cur.Spec.Schedule)
} }
newImage := new.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image newImage := new.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image
curImage := cur.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image curImage := cur.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image
if newImage != curImage { if newImage != curImage {
match = false return false, fmt.Sprintf("new job's image %q does not match the current one %q",
reasons = append(reasons, fmt.Sprintf("new job's image %q does not match the current one %q", newImage, curImage)) newImage, curImage)
} }
newPodAnnotation := new.Spec.JobTemplate.Spec.Template.Annotations newPodAnnotation := new.Spec.JobTemplate.Spec.Template.Annotations
curPodAnnotation := cur.Spec.JobTemplate.Spec.Template.Annotations curPodAnnotation := cur.Spec.JobTemplate.Spec.Template.Annotations
if changed, reason := c.compareAnnotations(curPodAnnotation, newPodAnnotation, &deletedPodAnnotations); changed { if changed, reason := c.compareAnnotations(curPodAnnotation, newPodAnnotation); changed {
match = false return false, fmt.Sprintf("new job's pod template metadata annotations does not match " + reason)
reasons = append(reasons, fmt.Sprint("new job's pod template metadata annotations do not match "+reason))
} }
newPgVersion := getPgVersion(new) newPgVersion := getPgVersion(new)
curPgVersion := getPgVersion(cur) curPgVersion := getPgVersion(cur)
if newPgVersion != curPgVersion { if newPgVersion != curPgVersion {
match = false return false, fmt.Sprintf("new job's env PG_VERSION %q does not match the current one %q",
reasons = append(reasons, fmt.Sprintf("new job's env PG_VERSION %q does not match the current one %q", newPgVersion, curPgVersion)) newPgVersion, curPgVersion)
} }
needsReplace := false needsReplace := false
contReasons := make([]string, 0) reasons := make([]string, 0)
needsReplace, contReasons = c.compareContainers("cronjob container", cur.Spec.JobTemplate.Spec.Template.Spec.Containers, new.Spec.JobTemplate.Spec.Template.Spec.Containers, needsReplace, contReasons) needsReplace, reasons = c.compareContainers("cronjob container", cur.Spec.JobTemplate.Spec.Template.Spec.Containers, new.Spec.JobTemplate.Spec.Template.Spec.Containers, needsReplace, reasons)
if needsReplace { if needsReplace {
match = false return false, fmt.Sprintf("logical backup container specs do not match: %v", strings.Join(reasons, `', '`))
reasons = append(reasons, fmt.Sprintf("logical backup container specs do not match: %v", strings.Join(contReasons, `', '`)))
} }
return &compareLogicalBackupJobResult{match: match, reasons: reasons, deletedPodAnnotations: deletedPodAnnotations} return true, ""
} }
func (c *Cluster) comparePodDisruptionBudget(cur, new *policyv1.PodDisruptionBudget) (bool, string) { func (c *Cluster) comparePodDisruptionBudget(cur, new *policyv1.PodDisruptionBudget) (bool, string) {
@ -902,7 +875,7 @@ func (c *Cluster) comparePodDisruptionBudget(cur, new *policyv1.PodDisruptionBud
if !reflect.DeepEqual(new.ObjectMeta.OwnerReferences, cur.ObjectMeta.OwnerReferences) { if !reflect.DeepEqual(new.ObjectMeta.OwnerReferences, cur.ObjectMeta.OwnerReferences) {
return false, "new PDB's owner references do not match the current ones" return false, "new PDB's owner references do not match the current ones"
} }
if changed, reason := c.compareAnnotations(cur.Annotations, new.Annotations, nil); changed { if changed, reason := c.compareAnnotations(cur.Annotations, new.Annotations); changed {
return false, "new PDB's annotations do not match the current ones:" + reason return false, "new PDB's annotations do not match the current ones:" + reason
} }
return true, "" return true, ""
@ -978,11 +951,6 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
defer c.mu.Unlock() defer c.mu.Unlock()
c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdating) c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdating)
if !isInMaintenanceWindow(newSpec.Spec.MaintenanceWindows) {
// do not apply any major version related changes yet
newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion
}
c.setSpec(newSpec) c.setSpec(newSpec)
defer func() { defer func() {
@ -1042,27 +1010,20 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
// only when streams were not specified in oldSpec but in newSpec // only when streams were not specified in oldSpec but in newSpec
needStreamUser := len(oldSpec.Spec.Streams) == 0 && len(newSpec.Spec.Streams) > 0 needStreamUser := len(oldSpec.Spec.Streams) == 0 && len(newSpec.Spec.Streams) > 0
annotationsChanged, _ := c.compareAnnotations(oldSpec.Annotations, newSpec.Annotations)
initUsers := !sameUsers || !sameRotatedUsers || needPoolerUser || needStreamUser initUsers := !sameUsers || !sameRotatedUsers || needPoolerUser || needStreamUser
if initUsers {
// if inherited annotations differ secrets have to be synced on update c.logger.Debugf("initialize users")
newAnnotations := c.annotationsSet(nil)
oldAnnotations := make(map[string]string)
for _, secret := range c.Secrets {
oldAnnotations = secret.ObjectMeta.Annotations
break
}
annotationsChanged, _ := c.compareAnnotations(oldAnnotations, newAnnotations, nil)
if initUsers || annotationsChanged {
c.logger.Debug("initialize users")
if err := c.initUsers(); err != nil { if err := c.initUsers(); err != nil {
c.logger.Errorf("could not init users - skipping sync of secrets and databases: %v", err) c.logger.Errorf("could not init users - skipping sync of secrets and databases: %v", err)
userInitFailed = true userInitFailed = true
updateFailed = true updateFailed = true
return return
} }
}
c.logger.Debug("syncing secrets") if initUsers || annotationsChanged {
c.logger.Debugf("syncing secrets")
//TODO: mind the secrets of the deleted/new users //TODO: mind the secrets of the deleted/new users
if err := c.syncSecrets(); err != nil { if err := c.syncSecrets(); err != nil {
c.logger.Errorf("could not sync secrets: %v", err) c.logger.Errorf("could not sync secrets: %v", err)
@ -1093,9 +1054,9 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
} }
} }
// pod disruption budgets // pod disruption budget
if err := c.syncPodDisruptionBudgets(true); err != nil { if err := c.syncPodDisruptionBudget(true); err != nil {
c.logger.Errorf("could not sync pod disruption budgets: %v", err) c.logger.Errorf("could not sync pod disruption budget: %v", err)
updateFailed = true updateFailed = true
} }
@ -1104,7 +1065,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
// create if it did not exist // create if it did not exist
if !oldSpec.Spec.EnableLogicalBackup && newSpec.Spec.EnableLogicalBackup { if !oldSpec.Spec.EnableLogicalBackup && newSpec.Spec.EnableLogicalBackup {
c.logger.Debug("creating backup cron job") c.logger.Debugf("creating backup cron job")
if err := c.createLogicalBackupJob(); err != nil { if err := c.createLogicalBackupJob(); err != nil {
c.logger.Errorf("could not create a k8s cron job for logical backups: %v", err) c.logger.Errorf("could not create a k8s cron job for logical backups: %v", err)
updateFailed = true updateFailed = true
@ -1114,7 +1075,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
// delete if no longer needed // delete if no longer needed
if oldSpec.Spec.EnableLogicalBackup && !newSpec.Spec.EnableLogicalBackup { if oldSpec.Spec.EnableLogicalBackup && !newSpec.Spec.EnableLogicalBackup {
c.logger.Debug("deleting backup cron job") c.logger.Debugf("deleting backup cron job")
if err := c.deleteLogicalBackupJob(); err != nil { if err := c.deleteLogicalBackupJob(); err != nil {
c.logger.Errorf("could not delete a k8s cron job for logical backups: %v", err) c.logger.Errorf("could not delete a k8s cron job for logical backups: %v", err)
updateFailed = true updateFailed = true
@ -1134,7 +1095,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
// Roles and Databases // Roles and Databases
if !userInitFailed && !(c.databaseAccessDisabled() || c.getNumberOfInstances(&c.Spec) <= 0 || c.Spec.StandbyCluster != nil) { if !userInitFailed && !(c.databaseAccessDisabled() || c.getNumberOfInstances(&c.Spec) <= 0 || c.Spec.StandbyCluster != nil) {
c.logger.Debug("syncing roles") c.logger.Debugf("syncing roles")
if err := c.syncRoles(); err != nil { if err := c.syncRoles(); err != nil {
c.logger.Errorf("could not sync roles: %v", err) c.logger.Errorf("could not sync roles: %v", err)
updateFailed = true updateFailed = true
@ -1168,7 +1129,6 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error {
// streams // streams
if len(newSpec.Spec.Streams) > 0 || len(oldSpec.Spec.Streams) != len(newSpec.Spec.Streams) { if len(newSpec.Spec.Streams) > 0 || len(oldSpec.Spec.Streams) != len(newSpec.Spec.Streams) {
c.logger.Debug("syncing streams")
if err := c.syncStreams(); err != nil { if err := c.syncStreams(); err != nil {
c.logger.Errorf("could not sync streams: %v", err) c.logger.Errorf("could not sync streams: %v", err)
updateFailed = true updateFailed = true
@ -1241,10 +1201,10 @@ func (c *Cluster) Delete() error {
c.logger.Info("not deleting secrets because disabled in configuration") c.logger.Info("not deleting secrets because disabled in configuration")
} }
if err := c.deletePodDisruptionBudgets(); err != nil { if err := c.deletePodDisruptionBudget(); err != nil {
anyErrors = true anyErrors = true
c.logger.Warningf("could not delete pod disruption budgets: %v", err) c.logger.Warningf("could not delete pod disruption budget: %v", err)
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not delete pod disruption budgets: %v", err) c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not delete pod disruption budget: %v", err)
} }
for _, role := range []PostgresRole{Master, Replica} { for _, role := range []PostgresRole{Master, Replica} {
@ -1430,18 +1390,18 @@ func (c *Cluster) initPreparedDatabaseRoles() error {
preparedSchemas = map[string]acidv1.PreparedSchema{"data": {DefaultRoles: util.True()}} preparedSchemas = map[string]acidv1.PreparedSchema{"data": {DefaultRoles: util.True()}}
} }
searchPathArr := []string{constants.DefaultSearchPath} var searchPath strings.Builder
searchPath.WriteString(constants.DefaultSearchPath)
for preparedSchemaName := range preparedSchemas { for preparedSchemaName := range preparedSchemas {
searchPathArr = append(searchPathArr, fmt.Sprintf("%q", preparedSchemaName)) searchPath.WriteString(", " + preparedSchemaName)
} }
searchPath := strings.Join(searchPathArr, ", ")
// default roles per database // default roles per database
if err := c.initDefaultRoles(defaultRoles, "admin", preparedDbName, searchPath, preparedDB.SecretNamespace); err != nil { if err := c.initDefaultRoles(defaultRoles, "admin", preparedDbName, searchPath.String(), preparedDB.SecretNamespace); err != nil {
return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err) return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err)
} }
if preparedDB.DefaultUsers { if preparedDB.DefaultUsers {
if err := c.initDefaultRoles(defaultUsers, "admin", preparedDbName, searchPath, preparedDB.SecretNamespace); err != nil { if err := c.initDefaultRoles(defaultUsers, "admin", preparedDbName, searchPath.String(), preparedDB.SecretNamespace); err != nil {
return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err) return fmt.Errorf("could not initialize default roles for database %s: %v", preparedDbName, err)
} }
} }
@ -1452,16 +1412,14 @@ func (c *Cluster) initPreparedDatabaseRoles() error {
if err := c.initDefaultRoles(defaultRoles, if err := c.initDefaultRoles(defaultRoles,
preparedDbName+constants.OwnerRoleNameSuffix, preparedDbName+constants.OwnerRoleNameSuffix,
preparedDbName+"_"+preparedSchemaName, preparedDbName+"_"+preparedSchemaName,
fmt.Sprintf("%s, %q", constants.DefaultSearchPath, preparedSchemaName), constants.DefaultSearchPath+", "+preparedSchemaName, preparedDB.SecretNamespace); err != nil {
preparedDB.SecretNamespace); err != nil {
return fmt.Errorf("could not initialize default roles for database schema %s: %v", preparedSchemaName, err) return fmt.Errorf("could not initialize default roles for database schema %s: %v", preparedSchemaName, err)
} }
if preparedSchema.DefaultUsers { if preparedSchema.DefaultUsers {
if err := c.initDefaultRoles(defaultUsers, if err := c.initDefaultRoles(defaultUsers,
preparedDbName+constants.OwnerRoleNameSuffix, preparedDbName+constants.OwnerRoleNameSuffix,
preparedDbName+"_"+preparedSchemaName, preparedDbName+"_"+preparedSchemaName,
fmt.Sprintf("%s, %q", constants.DefaultSearchPath, preparedSchemaName), constants.DefaultSearchPath+", "+preparedSchemaName, preparedDB.SecretNamespace); err != nil {
preparedDB.SecretNamespace); err != nil {
return fmt.Errorf("could not initialize default users for database schema %s: %v", preparedSchemaName, err) return fmt.Errorf("could not initialize default users for database schema %s: %v", preparedSchemaName, err)
} }
} }
@ -1751,8 +1709,7 @@ func (c *Cluster) GetStatus() *ClusterStatus {
MasterService: c.GetServiceMaster(), MasterService: c.GetServiceMaster(),
ReplicaService: c.GetServiceReplica(), ReplicaService: c.GetServiceReplica(),
StatefulSet: c.GetStatefulSet(), StatefulSet: c.GetStatefulSet(),
PrimaryPodDisruptionBudget: c.GetPrimaryPodDisruptionBudget(), PodDisruptionBudget: c.GetPodDisruptionBudget(),
CriticalOpPodDisruptionBudget: c.GetCriticalOpPodDisruptionBudget(),
CurrentProcess: c.GetCurrentProcess(), CurrentProcess: c.GetCurrentProcess(),
Error: fmt.Errorf("error: %s", c.Error), Error: fmt.Errorf("error: %s", c.Error),
@ -1766,58 +1723,18 @@ func (c *Cluster) GetStatus() *ClusterStatus {
return status return status
} }
func (c *Cluster) GetSwitchoverSchedule() string {
var possibleSwitchover, schedule time.Time
now := time.Now().UTC()
for _, window := range c.Spec.MaintenanceWindows {
// in the best case it is possible today
possibleSwitchover = time.Date(now.Year(), now.Month(), now.Day(), window.StartTime.Hour(), window.StartTime.Minute(), 0, 0, time.UTC)
if window.Everyday {
if now.After(possibleSwitchover) {
// we are already past the time for today, try tomorrow
possibleSwitchover = possibleSwitchover.AddDate(0, 0, 1)
}
} else {
if now.Weekday() != window.Weekday {
// get closest possible time for this window
possibleSwitchover = possibleSwitchover.AddDate(0, 0, int((7+window.Weekday-now.Weekday())%7))
} else if now.After(possibleSwitchover) {
// we are already past the time for today, try next week
possibleSwitchover = possibleSwitchover.AddDate(0, 0, 7)
}
}
if (schedule.Equal(time.Time{})) || possibleSwitchover.Before(schedule) {
schedule = possibleSwitchover
}
}
return schedule.Format("2006-01-02T15:04+00")
}
// Switchover does a switchover (via Patroni) to a candidate pod // Switchover does a switchover (via Patroni) to a candidate pod
func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName, scheduled bool) error { func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) error {
var err error
var err error
c.logger.Debugf("switching over from %q to %q", curMaster.Name, candidate)
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Switching over from %q to %q", curMaster.Name, candidate)
stopCh := make(chan struct{}) stopCh := make(chan struct{})
ch := c.registerPodSubscriber(candidate) ch := c.registerPodSubscriber(candidate)
defer c.unregisterPodSubscriber(candidate) defer c.unregisterPodSubscriber(candidate)
defer close(stopCh) defer close(stopCh)
var scheduled_at string if err = c.patroni.Switchover(curMaster, candidate.Name); err == nil {
if scheduled {
scheduled_at = c.GetSwitchoverSchedule()
} else {
c.logger.Debugf("switching over from %q to %q", curMaster.Name, candidate)
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Switching over from %q to %q", curMaster.Name, candidate)
scheduled_at = ""
}
if err = c.patroni.Switchover(curMaster, candidate.Name, scheduled_at); err == nil {
if scheduled {
c.logger.Infof("switchover from %q to %q is scheduled at %s", curMaster.Name, candidate, scheduled_at)
return nil
}
c.logger.Debugf("successfully switched over from %q to %q", curMaster.Name, candidate) c.logger.Debugf("successfully switched over from %q to %q", curMaster.Name, candidate)
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Successfully switched over from %q to %q", curMaster.Name, candidate) c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Successfully switched over from %q to %q", curMaster.Name, candidate)
_, err = c.waitForPodLabel(ch, stopCh, nil) _, err = c.waitForPodLabel(ch, stopCh, nil)
@ -1825,9 +1742,6 @@ func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName, s
err = fmt.Errorf("could not get master pod label: %v", err) err = fmt.Errorf("could not get master pod label: %v", err)
} }
} else { } else {
if scheduled {
return fmt.Errorf("could not schedule switchover: %v", err)
}
err = fmt.Errorf("could not switch over from %q to %q: %v", curMaster.Name, candidate, err) err = fmt.Errorf("could not switch over from %q to %q: %v", curMaster.Name, candidate, err)
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Switchover from %q to %q FAILED: %v", curMaster.Name, candidate, err) c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Switchover from %q to %q FAILED: %v", curMaster.Name, candidate, err)
} }

View File

@ -71,11 +71,11 @@ var cl = New(
Spec: acidv1.PostgresSpec{ Spec: acidv1.PostgresSpec{
EnableConnectionPooler: util.True(), EnableConnectionPooler: util.True(),
Streams: []acidv1.Stream{ Streams: []acidv1.Stream{
{ acidv1.Stream{
ApplicationId: "test-app", ApplicationId: "test-app",
Database: "test_db", Database: "test_db",
Tables: map[string]acidv1.StreamTable{ Tables: map[string]acidv1.StreamTable{
"test_table": { "test_table": acidv1.StreamTable{
EventType: "test-app.test", EventType: "test-app.test",
}, },
}, },
@ -95,7 +95,6 @@ func TestCreate(t *testing.T) {
client := k8sutil.KubernetesClient{ client := k8sutil.KubernetesClient{
DeploymentsGetter: clientSet.AppsV1(), DeploymentsGetter: clientSet.AppsV1(),
CronJobsGetter: clientSet.BatchV1(),
EndpointsGetter: clientSet.CoreV1(), EndpointsGetter: clientSet.CoreV1(),
PersistentVolumeClaimsGetter: clientSet.CoreV1(), PersistentVolumeClaimsGetter: clientSet.CoreV1(),
PodDisruptionBudgetsGetter: clientSet.PolicyV1(), PodDisruptionBudgetsGetter: clientSet.PolicyV1(),
@ -112,7 +111,6 @@ func TestCreate(t *testing.T) {
Namespace: clusterNamespace, Namespace: clusterNamespace,
}, },
Spec: acidv1.PostgresSpec{ Spec: acidv1.PostgresSpec{
EnableLogicalBackup: true,
Volume: acidv1.Volume{ Volume: acidv1.Volume{
Size: "1Gi", Size: "1Gi",
}, },
@ -1341,21 +1339,14 @@ func TestCompareEnv(t *testing.T) {
} }
} }
func newService( func newService(ann map[string]string, svcT v1.ServiceType, lbSr []string) *v1.Service {
annotations map[string]string,
svcType v1.ServiceType,
sourceRanges []string,
selector map[string]string,
policy v1.ServiceExternalTrafficPolicyType) *v1.Service {
svc := &v1.Service{ svc := &v1.Service{
Spec: v1.ServiceSpec{ Spec: v1.ServiceSpec{
Selector: selector, Type: svcT,
Type: svcType, LoadBalancerSourceRanges: lbSr,
LoadBalancerSourceRanges: sourceRanges,
ExternalTrafficPolicy: policy,
}, },
} }
svc.Annotations = annotations svc.Annotations = ann
return svc return svc
} }
@ -1372,18 +1363,13 @@ func TestCompareServices(t *testing.T) {
}, },
} }
defaultPolicy := v1.ServiceExternalTrafficPolicyTypeCluster
serviceWithOwnerReference := newService( serviceWithOwnerReference := newService(
map[string]string{ map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
}, },
v1.ServiceTypeClusterIP, v1.ServiceTypeClusterIP,
[]string{"128.141.0.0/16", "137.138.0.0/16"}, []string{"128.141.0.0/16", "137.138.0.0/16"})
nil,
defaultPolicy,
)
ownerRef := metav1.OwnerReference{ ownerRef := metav1.OwnerReference{
APIVersion: "acid.zalan.do/v1", APIVersion: "acid.zalan.do/v1",
@ -1409,16 +1395,14 @@ func TestCompareServices(t *testing.T) {
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
}, },
v1.ServiceTypeClusterIP, v1.ServiceTypeClusterIP,
[]string{"128.141.0.0/16", "137.138.0.0/16"}, []string{"128.141.0.0/16", "137.138.0.0/16"}),
nil, defaultPolicy),
new: newService( new: newService(
map[string]string{ map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
}, },
v1.ServiceTypeClusterIP, v1.ServiceTypeClusterIP,
[]string{"128.141.0.0/16", "137.138.0.0/16"}, []string{"128.141.0.0/16", "137.138.0.0/16"}),
nil, defaultPolicy),
match: true, match: true,
}, },
{ {
@ -1429,16 +1413,14 @@ func TestCompareServices(t *testing.T) {
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
}, },
v1.ServiceTypeClusterIP, v1.ServiceTypeClusterIP,
[]string{"128.141.0.0/16", "137.138.0.0/16"}, []string{"128.141.0.0/16", "137.138.0.0/16"}),
nil, defaultPolicy),
new: newService( new: newService(
map[string]string{ map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
}, },
v1.ServiceTypeLoadBalancer, v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"}, []string{"128.141.0.0/16", "137.138.0.0/16"}),
nil, defaultPolicy),
match: false, match: false,
reason: `new service's type "LoadBalancer" does not match the current one "ClusterIP"`, reason: `new service's type "LoadBalancer" does not match the current one "ClusterIP"`,
}, },
@ -1450,16 +1432,14 @@ func TestCompareServices(t *testing.T) {
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
}, },
v1.ServiceTypeLoadBalancer, v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"}, []string{"128.141.0.0/16", "137.138.0.0/16"}),
nil, defaultPolicy),
new: newService( new: newService(
map[string]string{ map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
}, },
v1.ServiceTypeLoadBalancer, v1.ServiceTypeLoadBalancer,
[]string{"185.249.56.0/22"}, []string{"185.249.56.0/22"}),
nil, defaultPolicy),
match: false, match: false,
reason: `new service's LoadBalancerSourceRange does not match the current one`, reason: `new service's LoadBalancerSourceRange does not match the current one`,
}, },
@ -1471,16 +1451,14 @@ func TestCompareServices(t *testing.T) {
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
}, },
v1.ServiceTypeLoadBalancer, v1.ServiceTypeLoadBalancer,
[]string{"128.141.0.0/16", "137.138.0.0/16"}, []string{"128.141.0.0/16", "137.138.0.0/16"}),
nil, defaultPolicy),
new: newService( new: newService(
map[string]string{ map[string]string{
constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do",
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
}, },
v1.ServiceTypeLoadBalancer, v1.ServiceTypeLoadBalancer,
[]string{}, []string{}),
nil, defaultPolicy),
match: false, match: false,
reason: `new service's LoadBalancerSourceRange does not match the current one`, reason: `new service's LoadBalancerSourceRange does not match the current one`,
}, },
@ -1492,39 +1470,10 @@ func TestCompareServices(t *testing.T) {
constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue,
}, },
v1.ServiceTypeClusterIP, v1.ServiceTypeClusterIP,
[]string{"128.141.0.0/16", "137.138.0.0/16"}, []string{"128.141.0.0/16", "137.138.0.0/16"}),
nil, defaultPolicy),
new: serviceWithOwnerReference, new: serviceWithOwnerReference,
match: false, match: false,
}, },
{
about: "new service has a label selector",
current: newService(
map[string]string{},
v1.ServiceTypeClusterIP,
[]string{},
nil, defaultPolicy),
new: newService(
map[string]string{},
v1.ServiceTypeClusterIP,
[]string{},
map[string]string{"cluster-name": "clstr", "spilo-role": "master"}, defaultPolicy),
match: false,
},
{
about: "services differ on external traffic policy",
current: newService(
map[string]string{},
v1.ServiceTypeClusterIP,
[]string{},
nil, defaultPolicy),
new: newService(
map[string]string{},
v1.ServiceTypeClusterIP,
[]string{},
nil, v1.ServiceExternalTrafficPolicyTypeLocal),
match: false,
},
} }
for _, tt := range tests { for _, tt := range tests {
@ -1555,7 +1504,7 @@ func newCronJob(image, schedule string, vars []v1.EnvVar, mounts []v1.VolumeMoun
Template: v1.PodTemplateSpec{ Template: v1.PodTemplateSpec{
Spec: v1.PodSpec{ Spec: v1.PodSpec{
Containers: []v1.Container{ Containers: []v1.Container{
{ v1.Container{
Name: "logical-backup", Name: "logical-backup",
Image: image, Image: image,
Env: vars, Env: vars,
@ -1729,20 +1678,12 @@ func TestCompareLogicalBackupJob(t *testing.T) {
} }
} }
cmp := cluster.compareLogicalBackupJob(currentCronJob, desiredCronJob) match, reason := cluster.compareLogicalBackupJob(currentCronJob, desiredCronJob)
if cmp.match != tt.match { if match != tt.match {
t.Errorf("%s - unexpected match result %t when comparing cronjobs %#v and %#v", t.Name(), cmp.match, currentCronJob, desiredCronJob) t.Errorf("%s - unexpected match result %t when comparing cronjobs %#v and %#v", t.Name(), match, currentCronJob, desiredCronJob)
} else if !cmp.match { } else {
found := false if !strings.HasPrefix(reason, tt.reason) {
for _, reason := range cmp.reasons { t.Errorf("%s - expected reason prefix %s, found %s", t.Name(), tt.reason, reason)
if strings.HasPrefix(reason, tt.reason) {
found = true
break
}
found = false
}
if !found {
t.Errorf("%s - expected reason prefix %s, not found in %#v", t.Name(), tt.reason, cmp.reasons)
} }
} }
}) })
@ -2114,91 +2055,3 @@ func TestCompareVolumeMounts(t *testing.T) {
}) })
} }
} }
func TestGetSwitchoverSchedule(t *testing.T) {
now := time.Now()
futureTimeStart := now.Add(1 * time.Hour)
futureWindowTimeStart := futureTimeStart.Format("15:04")
futureWindowTimeEnd := now.Add(2 * time.Hour).Format("15:04")
pastTimeStart := now.Add(-2 * time.Hour)
pastWindowTimeStart := pastTimeStart.Format("15:04")
pastWindowTimeEnd := now.Add(-1 * time.Hour).Format("15:04")
tests := []struct {
name string
windows []acidv1.MaintenanceWindow
expected string
}{
{
name: "everyday maintenance windows is later today",
windows: []acidv1.MaintenanceWindow{
{
Everyday: true,
StartTime: mustParseTime(futureWindowTimeStart),
EndTime: mustParseTime(futureWindowTimeEnd),
},
},
expected: futureTimeStart.Format("2006-01-02T15:04+00"),
},
{
name: "everyday maintenance window is tomorrow",
windows: []acidv1.MaintenanceWindow{
{
Everyday: true,
StartTime: mustParseTime(pastWindowTimeStart),
EndTime: mustParseTime(pastWindowTimeEnd),
},
},
expected: pastTimeStart.AddDate(0, 0, 1).Format("2006-01-02T15:04+00"),
},
{
name: "weekday maintenance windows is later today",
windows: []acidv1.MaintenanceWindow{
{
Weekday: now.Weekday(),
StartTime: mustParseTime(futureWindowTimeStart),
EndTime: mustParseTime(futureWindowTimeEnd),
},
},
expected: futureTimeStart.Format("2006-01-02T15:04+00"),
},
{
name: "weekday maintenance windows is passed for today",
windows: []acidv1.MaintenanceWindow{
{
Weekday: now.Weekday(),
StartTime: mustParseTime(pastWindowTimeStart),
EndTime: mustParseTime(pastWindowTimeEnd),
},
},
expected: pastTimeStart.AddDate(0, 0, 7).Format("2006-01-02T15:04+00"),
},
{
name: "choose the earliest window",
windows: []acidv1.MaintenanceWindow{
{
Weekday: now.AddDate(0, 0, 2).Weekday(),
StartTime: mustParseTime(futureWindowTimeStart),
EndTime: mustParseTime(futureWindowTimeEnd),
},
{
Everyday: true,
StartTime: mustParseTime(pastWindowTimeStart),
EndTime: mustParseTime(pastWindowTimeEnd),
},
},
expected: pastTimeStart.AddDate(0, 0, 1).Format("2006-01-02T15:04+00"),
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
cluster.Spec.MaintenanceWindows = tt.windows
schedule := cluster.GetSwitchoverSchedule()
if schedule != tt.expected {
t.Errorf("Expected GetSwitchoverSchedule to return %s, returned: %s", tt.expected, schedule)
}
})
}
}

View File

@ -2,7 +2,6 @@ package cluster
import ( import (
"context" "context"
"encoding/json"
"fmt" "fmt"
"reflect" "reflect"
"strings" "strings"
@ -592,7 +591,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) {
// Lack of connection pooler objects is not a fatal error, just log it if // Lack of connection pooler objects is not a fatal error, just log it if
// it was present before in the manifest // it was present before in the manifest
if c.ConnectionPooler[role] == nil || role == "" { if c.ConnectionPooler[role] == nil || role == "" {
c.logger.Debug("no connection pooler to delete") c.logger.Debugf("no connection pooler to delete")
return nil return nil
} }
@ -623,7 +622,7 @@ func (c *Cluster) deleteConnectionPooler(role PostgresRole) (err error) {
// Repeat the same for the service object // Repeat the same for the service object
service := c.ConnectionPooler[role].Service service := c.ConnectionPooler[role].Service
if service == nil { if service == nil {
c.logger.Debug("no connection pooler service object to delete") c.logger.Debugf("no connection pooler service object to delete")
} else { } else {
err = c.KubeClient. err = c.KubeClient.
@ -978,7 +977,6 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql
err error err error
) )
updatedPodAnnotations := map[string]*string{}
syncReason := make([]string, 0) syncReason := make([]string, 0)
deployment, err = c.KubeClient. deployment, err = c.KubeClient.
Deployments(c.Namespace). Deployments(c.Namespace).
@ -1040,27 +1038,9 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql
} }
newPodAnnotations := c.annotationsSet(c.generatePodAnnotations(&c.Spec)) newPodAnnotations := c.annotationsSet(c.generatePodAnnotations(&c.Spec))
deletedPodAnnotations := []string{} if changed, reason := c.compareAnnotations(deployment.Spec.Template.Annotations, newPodAnnotations); changed {
if changed, reason := c.compareAnnotations(deployment.Spec.Template.Annotations, newPodAnnotations, &deletedPodAnnotations); changed {
specSync = true specSync = true
syncReason = append(syncReason, []string{"new connection pooler's pod template annotations do not match the current ones: " + reason}...) syncReason = append(syncReason, []string{"new connection pooler's pod template annotations do not match the current ones: " + reason}...)
for _, anno := range deletedPodAnnotations {
updatedPodAnnotations[anno] = nil
}
templateMetadataReq := map[string]map[string]map[string]map[string]map[string]*string{
"spec": {"template": {"metadata": {"annotations": updatedPodAnnotations}}}}
patch, err := json.Marshal(templateMetadataReq)
if err != nil {
return nil, fmt.Errorf("could not marshal ObjectMeta for %s connection pooler's pod template: %v", role, err)
}
deployment, err = c.KubeClient.Deployments(c.Namespace).Patch(context.TODO(),
deployment.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "")
if err != nil {
c.logger.Errorf("failed to patch %s connection pooler's pod template: %v", role, err)
return nil, err
}
deployment.Spec.Template.Annotations = newPodAnnotations deployment.Spec.Template.Annotations = newPodAnnotations
} }
@ -1084,7 +1064,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql
} }
newAnnotations := c.AnnotationsToPropagate(c.annotationsSet(nil)) // including the downscaling annotations newAnnotations := c.AnnotationsToPropagate(c.annotationsSet(nil)) // including the downscaling annotations
if changed, _ := c.compareAnnotations(deployment.Annotations, newAnnotations, nil); changed { if changed, _ := c.compareAnnotations(deployment.Annotations, newAnnotations); changed {
deployment, err = patchConnectionPoolerAnnotations(c.KubeClient, deployment, newAnnotations) deployment, err = patchConnectionPoolerAnnotations(c.KubeClient, deployment, newAnnotations)
if err != nil { if err != nil {
return nil, err return nil, err
@ -1118,20 +1098,14 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql
if err != nil { if err != nil {
return nil, fmt.Errorf("could not delete pooler pod: %v", err) return nil, fmt.Errorf("could not delete pooler pod: %v", err)
} }
} else if changed, _ := c.compareAnnotations(pod.Annotations, deployment.Spec.Template.Annotations, nil); changed { } else if changed, _ := c.compareAnnotations(pod.Annotations, deployment.Spec.Template.Annotations); changed {
metadataReq := map[string]map[string]map[string]*string{"metadata": {}} patchData, err := metaAnnotationsPatch(deployment.Spec.Template.Annotations)
for anno, val := range deployment.Spec.Template.Annotations {
updatedPodAnnotations[anno] = &val
}
metadataReq["metadata"]["annotations"] = updatedPodAnnotations
patch, err := json.Marshal(metadataReq)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not marshal ObjectMeta for %s connection pooler's pods: %v", role, err) return nil, fmt.Errorf("could not form patch for pooler's pod annotations: %v", err)
} }
_, err = c.KubeClient.Pods(pod.Namespace).Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}) _, err = c.KubeClient.Pods(pod.Namespace).Patch(context.TODO(), pod.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{})
if err != nil { if err != nil {
return nil, fmt.Errorf("could not patch annotations for %s connection pooler's pod %q: %v", role, pod.Name, err) return nil, fmt.Errorf("could not patch annotations for pooler's pod %q: %v", pod.Name, err)
} }
} }
} }

View File

@ -969,7 +969,7 @@ func TestPoolerTLS(t *testing.T) {
TLS: &acidv1.TLSDescription{ TLS: &acidv1.TLSDescription{
SecretName: tlsSecretName, CAFile: "ca.crt"}, SecretName: tlsSecretName, CAFile: "ca.crt"},
AdditionalVolumes: []acidv1.AdditionalVolume{ AdditionalVolumes: []acidv1.AdditionalVolume{
{ acidv1.AdditionalVolume{
Name: tlsSecretName, Name: tlsSecretName,
MountPath: mountPath, MountPath: mountPath,
VolumeSource: v1.VolumeSource{ VolumeSource: v1.VolumeSource{

View File

@ -111,7 +111,7 @@ func (c *Cluster) pgConnectionString(dbname string) string {
func (c *Cluster) databaseAccessDisabled() bool { func (c *Cluster) databaseAccessDisabled() bool {
if !c.OpConfig.EnableDBAccess { if !c.OpConfig.EnableDBAccess {
c.logger.Debug("database access is disabled") c.logger.Debugf("database access is disabled")
} }
return !c.OpConfig.EnableDBAccess return !c.OpConfig.EnableDBAccess

View File

@ -4,9 +4,7 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"maps"
"path" "path"
"slices"
"sort" "sort"
"strings" "strings"
@ -14,16 +12,19 @@ import (
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"
batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1" policyv1 "k8s.io/api/policy/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors" apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource" "k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr" "k8s.io/apimachinery/pkg/util/intstr"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
batchv1 "k8s.io/api/batch/v1"
"k8s.io/apimachinery/pkg/labels"
acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1"
"github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/spec"
"github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util"
@ -108,15 +109,10 @@ func (c *Cluster) servicePort(role PostgresRole) int32 {
return pgPort return pgPort
} }
func (c *Cluster) PrimaryPodDisruptionBudgetName() string { func (c *Cluster) podDisruptionBudgetName() string {
return c.OpConfig.PDBNameFormat.Format("cluster", c.Name) return c.OpConfig.PDBNameFormat.Format("cluster", c.Name)
} }
func (c *Cluster) criticalOpPodDisruptionBudgetName() string {
pdbTemplate := config.StringTemplate("postgres-{cluster}-critical-op-pdb")
return pdbTemplate.Format("cluster", c.Name)
}
func makeDefaultResources(config *config.Config) acidv1.Resources { func makeDefaultResources(config *config.Config) acidv1.Resources {
defaultRequests := acidv1.ResourceDescription{ defaultRequests := acidv1.ResourceDescription{
@ -170,7 +166,7 @@ func (c *Cluster) enforceMinResourceLimits(resources *v1.ResourceRequirements) e
if isSmaller { if isSmaller {
msg = fmt.Sprintf("defined CPU limit %s for %q container is below required minimum %s and will be increased", msg = fmt.Sprintf("defined CPU limit %s for %q container is below required minimum %s and will be increased",
cpuLimit.String(), constants.PostgresContainerName, minCPULimit) cpuLimit.String(), constants.PostgresContainerName, minCPULimit)
c.logger.Warningf("%s", msg) c.logger.Warningf(msg)
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", msg) c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", msg)
resources.Limits[v1.ResourceCPU], _ = resource.ParseQuantity(minCPULimit) resources.Limits[v1.ResourceCPU], _ = resource.ParseQuantity(minCPULimit)
} }
@ -187,7 +183,7 @@ func (c *Cluster) enforceMinResourceLimits(resources *v1.ResourceRequirements) e
if isSmaller { if isSmaller {
msg = fmt.Sprintf("defined memory limit %s for %q container is below required minimum %s and will be increased", msg = fmt.Sprintf("defined memory limit %s for %q container is below required minimum %s and will be increased",
memoryLimit.String(), constants.PostgresContainerName, minMemoryLimit) memoryLimit.String(), constants.PostgresContainerName, minMemoryLimit)
c.logger.Warningf("%s", msg) c.logger.Warningf(msg)
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", msg) c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", msg)
resources.Limits[v1.ResourceMemory], _ = resource.ParseQuantity(minMemoryLimit) resources.Limits[v1.ResourceMemory], _ = resource.ParseQuantity(minMemoryLimit)
} }
@ -523,14 +519,13 @@ func (c *Cluster) nodeAffinity(nodeReadinessLabel map[string]string, nodeAffinit
}, },
} }
} else { } else {
switch c.OpConfig.NodeReadinessLabelMerge { if c.OpConfig.NodeReadinessLabelMerge == "OR" {
case "OR":
manifestTerms := nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms manifestTerms := nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms
manifestTerms = append(manifestTerms, nodeReadinessSelectorTerm) manifestTerms = append(manifestTerms, nodeReadinessSelectorTerm)
nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{ nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{
NodeSelectorTerms: manifestTerms, NodeSelectorTerms: manifestTerms,
} }
case "AND": } else if c.OpConfig.NodeReadinessLabelMerge == "AND" {
for i, nodeSelectorTerm := range nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms { for i, nodeSelectorTerm := range nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms {
manifestExpressions := nodeSelectorTerm.MatchExpressions manifestExpressions := nodeSelectorTerm.MatchExpressions
manifestExpressions = append(manifestExpressions, matchExpressions...) manifestExpressions = append(manifestExpressions, matchExpressions...)
@ -744,7 +739,7 @@ func (c *Cluster) generateSidecarContainers(sidecars []acidv1.Sidecar,
} }
// adds common fields to sidecars // adds common fields to sidecars
func patchSidecarContainers(in []v1.Container, volumeMounts []v1.VolumeMount, superUserName string, credentialsSecretName string) []v1.Container { func patchSidecarContainers(in []v1.Container, volumeMounts []v1.VolumeMount, superUserName string, credentialsSecretName string, logger *logrus.Entry) []v1.Container {
result := []v1.Container{} result := []v1.Container{}
for _, container := range in { for _, container := range in {
@ -1010,9 +1005,6 @@ func (c *Cluster) generateSpiloPodEnvVars(
if c.patroniUsesKubernetes() { if c.patroniUsesKubernetes() {
envVars = append(envVars, v1.EnvVar{Name: "DCS_ENABLE_KUBERNETES_API", Value: "true"}) envVars = append(envVars, v1.EnvVar{Name: "DCS_ENABLE_KUBERNETES_API", Value: "true"})
if c.OpConfig.EnablePodDisruptionBudget != nil && *c.OpConfig.EnablePodDisruptionBudget {
envVars = append(envVars, v1.EnvVar{Name: "KUBERNETES_BOOTSTRAP_LABELS", Value: "{\"critical-operation\":\"true\"}"})
}
} else { } else {
envVars = append(envVars, v1.EnvVar{Name: "ETCD_HOST", Value: c.OpConfig.EtcdHost}) envVars = append(envVars, v1.EnvVar{Name: "ETCD_HOST", Value: c.OpConfig.EtcdHost})
} }
@ -1230,7 +1222,6 @@ func getSidecarContainer(sidecar acidv1.Sidecar, index int, resources *v1.Resour
Resources: *resources, Resources: *resources,
Env: sidecar.Env, Env: sidecar.Env,
Ports: sidecar.Ports, Ports: sidecar.Ports,
Command: sidecar.Command,
} }
} }
@ -1298,7 +1289,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
return nil, fmt.Errorf("could not generate resource requirements: %v", err) return nil, fmt.Errorf("could not generate resource requirements: %v", err)
} }
if len(spec.InitContainers) > 0 { if spec.InitContainers != nil && len(spec.InitContainers) > 0 {
if c.OpConfig.EnableInitContainers != nil && !(*c.OpConfig.EnableInitContainers) { if c.OpConfig.EnableInitContainers != nil && !(*c.OpConfig.EnableInitContainers) {
c.logger.Warningf("initContainers specified but disabled in configuration - next statefulset creation would fail") c.logger.Warningf("initContainers specified but disabled in configuration - next statefulset creation would fail")
} }
@ -1401,7 +1392,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
// generate container specs for sidecars specified in the cluster manifest // generate container specs for sidecars specified in the cluster manifest
clusterSpecificSidecars := []v1.Container{} clusterSpecificSidecars := []v1.Container{}
if len(spec.Sidecars) > 0 { if spec.Sidecars != nil && len(spec.Sidecars) > 0 {
// warn if sidecars are defined, but globally disabled (does not apply to globally defined sidecars) // warn if sidecars are defined, but globally disabled (does not apply to globally defined sidecars)
if c.OpConfig.EnableSidecars != nil && !(*c.OpConfig.EnableSidecars) { if c.OpConfig.EnableSidecars != nil && !(*c.OpConfig.EnableSidecars) {
c.logger.Warningf("sidecars specified but disabled in configuration - next statefulset creation would fail") c.logger.Warningf("sidecars specified but disabled in configuration - next statefulset creation would fail")
@ -1453,7 +1444,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
containerName, containerName) containerName, containerName)
} }
sidecarContainers = patchSidecarContainers(sidecarContainers, volumeMounts, c.OpConfig.SuperUsername, c.credentialSecretName(c.OpConfig.SuperUsername)) sidecarContainers = patchSidecarContainers(sidecarContainers, volumeMounts, c.OpConfig.SuperUsername, c.credentialSecretName(c.OpConfig.SuperUsername), c.logger)
tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration) tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration)
effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName) effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName)
@ -1505,12 +1496,11 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef
updateStrategy := appsv1.StatefulSetUpdateStrategy{Type: appsv1.OnDeleteStatefulSetStrategyType} updateStrategy := appsv1.StatefulSetUpdateStrategy{Type: appsv1.OnDeleteStatefulSetStrategyType}
var podManagementPolicy appsv1.PodManagementPolicyType var podManagementPolicy appsv1.PodManagementPolicyType
switch c.OpConfig.PodManagementPolicy { if c.OpConfig.PodManagementPolicy == "ordered_ready" {
case "ordered_ready":
podManagementPolicy = appsv1.OrderedReadyPodManagement podManagementPolicy = appsv1.OrderedReadyPodManagement
case "parallel": } else if c.OpConfig.PodManagementPolicy == "parallel" {
podManagementPolicy = appsv1.ParallelPodManagement podManagementPolicy = appsv1.ParallelPodManagement
default: } else {
return nil, fmt.Errorf("could not set the pod management policy to the unknown value: %v", c.OpConfig.PodManagementPolicy) return nil, fmt.Errorf("could not set the pod management policy to the unknown value: %v", c.OpConfig.PodManagementPolicy)
} }
@ -1608,7 +1598,7 @@ func (c *Cluster) generatePodAnnotations(spec *acidv1.PostgresSpec) map[string]s
for k, v := range c.OpConfig.CustomPodAnnotations { for k, v := range c.OpConfig.CustomPodAnnotations {
annotations[k] = v annotations[k] = v
} }
if spec.PodAnnotations != nil { if spec != nil || spec.PodAnnotations != nil {
for k, v := range spec.PodAnnotations { for k, v := range spec.PodAnnotations {
annotations[k] = v annotations[k] = v
} }
@ -1869,7 +1859,7 @@ func (c *Cluster) generatePersistentVolumeClaimTemplate(volumeSize, volumeStorag
}, },
Spec: v1.PersistentVolumeClaimSpec{ Spec: v1.PersistentVolumeClaimSpec{
AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce}, AccessModes: []v1.PersistentVolumeAccessMode{v1.ReadWriteOnce},
Resources: v1.VolumeResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{ Requests: v1.ResourceList{
v1.ResourceStorage: quantity, v1.ResourceStorage: quantity,
}, },
@ -1885,16 +1875,18 @@ func (c *Cluster) generatePersistentVolumeClaimTemplate(volumeSize, volumeStorag
func (c *Cluster) generateUserSecrets() map[string]*v1.Secret { func (c *Cluster) generateUserSecrets() map[string]*v1.Secret {
secrets := make(map[string]*v1.Secret, len(c.pgUsers)+len(c.systemUsers)) secrets := make(map[string]*v1.Secret, len(c.pgUsers)+len(c.systemUsers))
namespace := c.Namespace
for username, pgUser := range c.pgUsers { for username, pgUser := range c.pgUsers {
//Skip users with no password i.e. human users (they'll be authenticated using pam) //Skip users with no password i.e. human users (they'll be authenticated using pam)
secret := c.generateSingleUserSecret(pgUser) secret := c.generateSingleUserSecret(pgUser.Namespace, pgUser)
if secret != nil { if secret != nil {
secrets[username] = secret secrets[username] = secret
} }
namespace = pgUser.Namespace
} }
/* special case for the system user */ /* special case for the system user */
for _, systemUser := range c.systemUsers { for _, systemUser := range c.systemUsers {
secret := c.generateSingleUserSecret(systemUser) secret := c.generateSingleUserSecret(namespace, systemUser)
if secret != nil { if secret != nil {
secrets[systemUser.Name] = secret secrets[systemUser.Name] = secret
} }
@ -1903,7 +1895,7 @@ func (c *Cluster) generateUserSecrets() map[string]*v1.Secret {
return secrets return secrets
} }
func (c *Cluster) generateSingleUserSecret(pgUser spec.PgUser) *v1.Secret { func (c *Cluster) generateSingleUserSecret(namespace string, pgUser spec.PgUser) *v1.Secret {
//Skip users with no password i.e. human users (they'll be authenticated using pam) //Skip users with no password i.e. human users (they'll be authenticated using pam)
if pgUser.Password == "" { if pgUser.Password == "" {
if pgUser.Origin != spec.RoleOriginTeamsAPI { if pgUser.Origin != spec.RoleOriginTeamsAPI {
@ -1929,7 +1921,7 @@ func (c *Cluster) generateSingleUserSecret(pgUser spec.PgUser) *v1.Secret {
// if secret lives in another namespace we cannot set ownerReferences // if secret lives in another namespace we cannot set ownerReferences
var ownerReferences []metav1.OwnerReference var ownerReferences []metav1.OwnerReference
if c.Config.OpConfig.EnableCrossNamespaceSecret && c.Postgresql.ObjectMeta.Namespace != pgUser.Namespace { if c.Config.OpConfig.EnableCrossNamespaceSecret && strings.Contains(username, ".") {
ownerReferences = nil ownerReferences = nil
} else { } else {
ownerReferences = c.ownerReferences() ownerReferences = c.ownerReferences()
@ -2216,7 +2208,7 @@ func (c *Cluster) generateStandbyEnvironment(description *acidv1.StandbyDescript
return result return result
} }
func (c *Cluster) generatePrimaryPodDisruptionBudget() *policyv1.PodDisruptionBudget { func (c *Cluster) generatePodDisruptionBudget() *policyv1.PodDisruptionBudget {
minAvailable := intstr.FromInt(1) minAvailable := intstr.FromInt(1)
pdbEnabled := c.OpConfig.EnablePodDisruptionBudget pdbEnabled := c.OpConfig.EnablePodDisruptionBudget
pdbMasterLabelSelector := c.OpConfig.PDBMasterLabelSelector pdbMasterLabelSelector := c.OpConfig.PDBMasterLabelSelector
@ -2234,36 +2226,7 @@ func (c *Cluster) generatePrimaryPodDisruptionBudget() *policyv1.PodDisruptionBu
return &policyv1.PodDisruptionBudget{ return &policyv1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: c.PrimaryPodDisruptionBudgetName(), Name: c.podDisruptionBudgetName(),
Namespace: c.Namespace,
Labels: c.labelsSet(true),
Annotations: c.annotationsSet(nil),
OwnerReferences: c.ownerReferences(),
},
Spec: policyv1.PodDisruptionBudgetSpec{
MinAvailable: &minAvailable,
Selector: &metav1.LabelSelector{
MatchLabels: labels,
},
},
}
}
func (c *Cluster) generateCriticalOpPodDisruptionBudget() *policyv1.PodDisruptionBudget {
minAvailable := intstr.FromInt32(c.Spec.NumberOfInstances)
pdbEnabled := c.OpConfig.EnablePodDisruptionBudget
// if PodDisruptionBudget is disabled or if there are no DB pods, set the budget to 0.
if (pdbEnabled != nil && !(*pdbEnabled)) || c.Spec.NumberOfInstances <= 0 {
minAvailable = intstr.FromInt(0)
}
labels := c.labelsSet(false)
labels["critical-operation"] = "true"
return &policyv1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: c.criticalOpPodDisruptionBudgetName(),
Namespace: c.Namespace, Namespace: c.Namespace,
Labels: c.labelsSet(true), Labels: c.labelsSet(true),
Annotations: c.annotationsSet(nil), Annotations: c.annotationsSet(nil),

View File

@ -72,18 +72,18 @@ func TestGenerateSpiloJSONConfiguration(t *testing.T) {
}{ }{
{ {
subtest: "Patroni default configuration", subtest: "Patroni default configuration",
pgParam: &acidv1.PostgresqlParam{PgVersion: "17"}, pgParam: &acidv1.PostgresqlParam{PgVersion: "16"},
patroni: &acidv1.Patroni{}, patroni: &acidv1.Patroni{},
opConfig: &config.Config{ opConfig: &config.Config{
Auth: config.Auth{ Auth: config.Auth{
PamRoleName: "zalandos", PamRoleName: "zalandos",
}, },
}, },
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{}}}`, result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/16/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{}}}`,
}, },
{ {
subtest: "Patroni configured", subtest: "Patroni configured",
pgParam: &acidv1.PostgresqlParam{PgVersion: "17"}, pgParam: &acidv1.PostgresqlParam{PgVersion: "16"},
patroni: &acidv1.Patroni{ patroni: &acidv1.Patroni{
InitDB: map[string]string{ InitDB: map[string]string{
"encoding": "UTF8", "encoding": "UTF8",
@ -102,38 +102,38 @@ func TestGenerateSpiloJSONConfiguration(t *testing.T) {
FailsafeMode: util.True(), FailsafeMode: util.True(),
}, },
opConfig: &config.Config{}, opConfig: &config.Config{},
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin","pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"synchronous_mode":true,"synchronous_mode_strict":true,"synchronous_node_count":1,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}},"failsafe_mode":true}}}`, result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/16/bin","pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"synchronous_mode":true,"synchronous_mode_strict":true,"synchronous_node_count":1,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}},"failsafe_mode":true}}}`,
}, },
{ {
subtest: "Patroni failsafe_mode configured globally", subtest: "Patroni failsafe_mode configured globally",
pgParam: &acidv1.PostgresqlParam{PgVersion: "17"}, pgParam: &acidv1.PostgresqlParam{PgVersion: "16"},
patroni: &acidv1.Patroni{}, patroni: &acidv1.Patroni{},
opConfig: &config.Config{ opConfig: &config.Config{
EnablePatroniFailsafeMode: util.True(), EnablePatroniFailsafeMode: util.True(),
}, },
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":true}}}`, result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/16/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":true}}}`,
}, },
{ {
subtest: "Patroni failsafe_mode configured globally, disabled for cluster", subtest: "Patroni failsafe_mode configured globally, disabled for cluster",
pgParam: &acidv1.PostgresqlParam{PgVersion: "17"}, pgParam: &acidv1.PostgresqlParam{PgVersion: "16"},
patroni: &acidv1.Patroni{ patroni: &acidv1.Patroni{
FailsafeMode: util.False(), FailsafeMode: util.False(),
}, },
opConfig: &config.Config{ opConfig: &config.Config{
EnablePatroniFailsafeMode: util.True(), EnablePatroniFailsafeMode: util.True(),
}, },
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":false}}}`, result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/16/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":false}}}`,
}, },
{ {
subtest: "Patroni failsafe_mode disabled globally, configured for cluster", subtest: "Patroni failsafe_mode disabled globally, configured for cluster",
pgParam: &acidv1.PostgresqlParam{PgVersion: "17"}, pgParam: &acidv1.PostgresqlParam{PgVersion: "16"},
patroni: &acidv1.Patroni{ patroni: &acidv1.Patroni{
FailsafeMode: util.True(), FailsafeMode: util.True(),
}, },
opConfig: &config.Config{ opConfig: &config.Config{
EnablePatroniFailsafeMode: util.False(), EnablePatroniFailsafeMode: util.False(),
}, },
result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":true}}}`, result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/16/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":true}}}`,
}, },
} }
for _, tt := range tests { for _, tt := range tests {
@ -164,15 +164,15 @@ func TestExtractPgVersionFromBinPath(t *testing.T) {
}, },
{ {
subTest: "test current bin path against hard coded template", subTest: "test current bin path against hard coded template",
binPath: "/usr/lib/postgresql/17/bin", binPath: "/usr/lib/postgresql/16/bin",
template: pgBinariesLocationTemplate, template: pgBinariesLocationTemplate,
expected: "17", expected: "16",
}, },
{ {
subTest: "test alternative bin path against a matching template", subTest: "test alternative bin path against a matching template",
binPath: "/usr/pgsql-17/bin", binPath: "/usr/pgsql-16/bin",
template: "/usr/pgsql-%v/bin", template: "/usr/pgsql-%v/bin",
expected: "17", expected: "16",
}, },
} }
@ -1451,9 +1451,9 @@ func TestNodeAffinity(t *testing.T) {
nodeAff := &v1.NodeAffinity{ nodeAff := &v1.NodeAffinity{
RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{ RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
NodeSelectorTerms: []v1.NodeSelectorTerm{ NodeSelectorTerms: []v1.NodeSelectorTerm{
{ v1.NodeSelectorTerm{
MatchExpressions: []v1.NodeSelectorRequirement{ MatchExpressions: []v1.NodeSelectorRequirement{
{ v1.NodeSelectorRequirement{
Key: "test-label", Key: "test-label",
Operator: v1.NodeSelectorOpIn, Operator: v1.NodeSelectorOpIn,
Values: []string{ Values: []string{
@ -1673,7 +1673,7 @@ func TestTLS(t *testing.T) {
TLS: &acidv1.TLSDescription{ TLS: &acidv1.TLSDescription{
SecretName: tlsSecretName, CAFile: "ca.crt"}, SecretName: tlsSecretName, CAFile: "ca.crt"},
AdditionalVolumes: []acidv1.AdditionalVolume{ AdditionalVolumes: []acidv1.AdditionalVolume{
{ acidv1.AdditionalVolume{
Name: tlsSecretName, Name: tlsSecretName,
MountPath: mountPath, MountPath: mountPath,
VolumeSource: v1.VolumeSource{ VolumeSource: v1.VolumeSource{
@ -2148,7 +2148,7 @@ func TestSidecars(t *testing.T) {
spec = acidv1.PostgresSpec{ spec = acidv1.PostgresSpec{
PostgresqlParam: acidv1.PostgresqlParam{ PostgresqlParam: acidv1.PostgresqlParam{
PgVersion: "17", PgVersion: "16",
Parameters: map[string]string{ Parameters: map[string]string{
"max_connections": "100", "max_connections": "100",
}, },
@ -2162,17 +2162,17 @@ func TestSidecars(t *testing.T) {
Size: "1G", Size: "1G",
}, },
Sidecars: []acidv1.Sidecar{ Sidecars: []acidv1.Sidecar{
{ acidv1.Sidecar{
Name: "cluster-specific-sidecar", Name: "cluster-specific-sidecar",
}, },
{ acidv1.Sidecar{
Name: "cluster-specific-sidecar-with-resources", Name: "cluster-specific-sidecar-with-resources",
Resources: &acidv1.Resources{ Resources: &acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("210m"), Memory: k8sutil.StringToPointer("0.8Gi")}, ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("210m"), Memory: k8sutil.StringToPointer("0.8Gi")},
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("510m"), Memory: k8sutil.StringToPointer("1.4Gi")}, ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("510m"), Memory: k8sutil.StringToPointer("1.4Gi")},
}, },
}, },
{ acidv1.Sidecar{
Name: "replace-sidecar", Name: "replace-sidecar",
DockerImage: "override-image", DockerImage: "override-image",
}, },
@ -2200,11 +2200,11 @@ func TestSidecars(t *testing.T) {
"deprecated-global-sidecar": "image:123", "deprecated-global-sidecar": "image:123",
}, },
SidecarContainers: []v1.Container{ SidecarContainers: []v1.Container{
{ v1.Container{
Name: "global-sidecar", Name: "global-sidecar",
}, },
// will be replaced by a cluster specific sidecar with the same name // will be replaced by a cluster specific sidecar with the same name
{ v1.Container{
Name: "replace-sidecar", Name: "replace-sidecar",
Image: "replaced-image", Image: "replaced-image",
}, },
@ -2259,7 +2259,7 @@ func TestSidecars(t *testing.T) {
}, },
} }
mounts := []v1.VolumeMount{ mounts := []v1.VolumeMount{
{ v1.VolumeMount{
Name: "pgdata", Name: "pgdata",
MountPath: "/home/postgres/pgdata", MountPath: "/home/postgres/pgdata",
}, },
@ -2349,35 +2349,23 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
} }
} }
testLabelsAndSelectors := func(isPrimary bool) func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error { testLabelsAndSelectors := func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error {
return func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error {
masterLabelSelectorDisabled := cluster.OpConfig.PDBMasterLabelSelector != nil && !*cluster.OpConfig.PDBMasterLabelSelector masterLabelSelectorDisabled := cluster.OpConfig.PDBMasterLabelSelector != nil && !*cluster.OpConfig.PDBMasterLabelSelector
if podDisruptionBudget.ObjectMeta.Namespace != "myapp" { if podDisruptionBudget.ObjectMeta.Namespace != "myapp" {
return fmt.Errorf("Object Namespace incorrect.") return fmt.Errorf("Object Namespace incorrect.")
} }
expectedLabels := map[string]string{"team": "myapp", "cluster-name": "myapp-database"} if !reflect.DeepEqual(podDisruptionBudget.Labels, map[string]string{"team": "myapp", "cluster-name": "myapp-database"}) {
if !reflect.DeepEqual(podDisruptionBudget.Labels, expectedLabels) { return fmt.Errorf("Labels incorrect.")
return fmt.Errorf("Labels incorrect, got %#v, expected %#v", podDisruptionBudget.Labels, expectedLabels)
}
if !masterLabelSelectorDisabled {
if isPrimary {
expectedLabels := &metav1.LabelSelector{
MatchLabels: map[string]string{"spilo-role": "master", "cluster-name": "myapp-database"}}
if !reflect.DeepEqual(podDisruptionBudget.Spec.Selector, expectedLabels) {
return fmt.Errorf("MatchLabels incorrect, got %#v, expected %#v", podDisruptionBudget.Spec.Selector, expectedLabels)
}
} else {
expectedLabels := &metav1.LabelSelector{
MatchLabels: map[string]string{"cluster-name": "myapp-database", "critical-operation": "true"}}
if !reflect.DeepEqual(podDisruptionBudget.Spec.Selector, expectedLabels) {
return fmt.Errorf("MatchLabels incorrect, got %#v, expected %#v", podDisruptionBudget.Spec.Selector, expectedLabels)
}
} }
if !masterLabelSelectorDisabled &&
!reflect.DeepEqual(podDisruptionBudget.Spec.Selector, &metav1.LabelSelector{
MatchLabels: map[string]string{"spilo-role": "master", "cluster-name": "myapp-database"}}) {
return fmt.Errorf("MatchLabels incorrect.")
} }
return nil return nil
} }
}
testPodDisruptionBudgetOwnerReference := func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error { testPodDisruptionBudgetOwnerReference := func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error {
if len(podDisruptionBudget.ObjectMeta.OwnerReferences) == 0 { if len(podDisruptionBudget.ObjectMeta.OwnerReferences) == 0 {
@ -2412,7 +2400,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
testPodDisruptionBudgetOwnerReference, testPodDisruptionBudgetOwnerReference,
hasName("postgres-myapp-database-pdb"), hasName("postgres-myapp-database-pdb"),
hasMinAvailable(1), hasMinAvailable(1),
testLabelsAndSelectors(true), testLabelsAndSelectors,
}, },
}, },
{ {
@ -2429,7 +2417,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
testPodDisruptionBudgetOwnerReference, testPodDisruptionBudgetOwnerReference,
hasName("postgres-myapp-database-pdb"), hasName("postgres-myapp-database-pdb"),
hasMinAvailable(0), hasMinAvailable(0),
testLabelsAndSelectors(true), testLabelsAndSelectors,
}, },
}, },
{ {
@ -2446,7 +2434,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
testPodDisruptionBudgetOwnerReference, testPodDisruptionBudgetOwnerReference,
hasName("postgres-myapp-database-pdb"), hasName("postgres-myapp-database-pdb"),
hasMinAvailable(0), hasMinAvailable(0),
testLabelsAndSelectors(true), testLabelsAndSelectors,
}, },
}, },
{ {
@ -2463,7 +2451,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
testPodDisruptionBudgetOwnerReference, testPodDisruptionBudgetOwnerReference,
hasName("postgres-myapp-database-databass-budget"), hasName("postgres-myapp-database-databass-budget"),
hasMinAvailable(1), hasMinAvailable(1),
testLabelsAndSelectors(true), testLabelsAndSelectors,
}, },
}, },
{ {
@ -2480,7 +2468,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
testPodDisruptionBudgetOwnerReference, testPodDisruptionBudgetOwnerReference,
hasName("postgres-myapp-database-pdb"), hasName("postgres-myapp-database-pdb"),
hasMinAvailable(1), hasMinAvailable(1),
testLabelsAndSelectors(true), testLabelsAndSelectors,
}, },
}, },
{ {
@ -2497,99 +2485,13 @@ func TestGeneratePodDisruptionBudget(t *testing.T) {
testPodDisruptionBudgetOwnerReference, testPodDisruptionBudgetOwnerReference,
hasName("postgres-myapp-database-pdb"), hasName("postgres-myapp-database-pdb"),
hasMinAvailable(1), hasMinAvailable(1),
testLabelsAndSelectors(true), testLabelsAndSelectors,
}, },
}, },
} }
for _, tt := range tests { for _, tt := range tests {
result := tt.spec.generatePrimaryPodDisruptionBudget() result := tt.spec.generatePodDisruptionBudget()
for _, check := range tt.check {
err := check(tt.spec, result)
if err != nil {
t.Errorf("%s [%s]: PodDisruptionBudget spec is incorrect, %+v",
testName, tt.scenario, err)
}
}
}
testCriticalOp := []struct {
scenario string
spec *Cluster
check []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error
}{
{
scenario: "With multiple instances",
spec: New(
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb"}},
k8sutil.KubernetesClient{},
acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}},
logger,
eventRecorder),
check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{
testPodDisruptionBudgetOwnerReference,
hasName("postgres-myapp-database-critical-op-pdb"),
hasMinAvailable(3),
testLabelsAndSelectors(false),
},
},
{
scenario: "With zero instances",
spec: New(
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb"}},
k8sutil.KubernetesClient{},
acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 0}},
logger,
eventRecorder),
check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{
testPodDisruptionBudgetOwnerReference,
hasName("postgres-myapp-database-critical-op-pdb"),
hasMinAvailable(0),
testLabelsAndSelectors(false),
},
},
{
scenario: "With PodDisruptionBudget disabled",
spec: New(
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.False()}},
k8sutil.KubernetesClient{},
acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}},
logger,
eventRecorder),
check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{
testPodDisruptionBudgetOwnerReference,
hasName("postgres-myapp-database-critical-op-pdb"),
hasMinAvailable(0),
testLabelsAndSelectors(false),
},
},
{
scenario: "With OwnerReference enabled",
spec: New(
Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", EnableOwnerReferences: util.True()}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.True()}},
k8sutil.KubernetesClient{},
acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"},
Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}},
logger,
eventRecorder),
check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{
testPodDisruptionBudgetOwnerReference,
hasName("postgres-myapp-database-critical-op-pdb"),
hasMinAvailable(3),
testLabelsAndSelectors(false),
},
},
}
for _, tt := range testCriticalOp {
result := tt.spec.generateCriticalOpPodDisruptionBudget()
for _, check := range tt.check { for _, check := range tt.check {
err := check(tt.spec, result) err := check(tt.spec, result)
if err != nil { if err != nil {
@ -2614,17 +2516,17 @@ func TestGenerateService(t *testing.T) {
Size: "1G", Size: "1G",
}, },
Sidecars: []acidv1.Sidecar{ Sidecars: []acidv1.Sidecar{
{ acidv1.Sidecar{
Name: "cluster-specific-sidecar", Name: "cluster-specific-sidecar",
}, },
{ acidv1.Sidecar{
Name: "cluster-specific-sidecar-with-resources", Name: "cluster-specific-sidecar-with-resources",
Resources: &acidv1.Resources{ Resources: &acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("210m"), Memory: k8sutil.StringToPointer("0.8Gi")}, ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("210m"), Memory: k8sutil.StringToPointer("0.8Gi")},
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("510m"), Memory: k8sutil.StringToPointer("1.4Gi")}, ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("510m"), Memory: k8sutil.StringToPointer("1.4Gi")},
}, },
}, },
{ acidv1.Sidecar{
Name: "replace-sidecar", Name: "replace-sidecar",
DockerImage: "override-image", DockerImage: "override-image",
}, },
@ -2653,11 +2555,11 @@ func TestGenerateService(t *testing.T) {
"deprecated-global-sidecar": "image:123", "deprecated-global-sidecar": "image:123",
}, },
SidecarContainers: []v1.Container{ SidecarContainers: []v1.Container{
{ v1.Container{
Name: "global-sidecar", Name: "global-sidecar",
}, },
// will be replaced by a cluster specific sidecar with the same name // will be replaced by a cluster specific sidecar with the same name
{ v1.Container{
Name: "replace-sidecar", Name: "replace-sidecar",
Image: "replaced-image", Image: "replaced-image",
}, },
@ -2752,27 +2654,27 @@ func newLBFakeClient() (k8sutil.KubernetesClient, *fake.Clientset) {
func getServices(serviceType v1.ServiceType, sourceRanges []string, extTrafficPolicy, clusterName string) []v1.ServiceSpec { func getServices(serviceType v1.ServiceType, sourceRanges []string, extTrafficPolicy, clusterName string) []v1.ServiceSpec {
return []v1.ServiceSpec{ return []v1.ServiceSpec{
{ v1.ServiceSpec{
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyType(extTrafficPolicy), ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyType(extTrafficPolicy),
LoadBalancerSourceRanges: sourceRanges, LoadBalancerSourceRanges: sourceRanges,
Ports: []v1.ServicePort{{Name: "postgresql", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}}, Ports: []v1.ServicePort{{Name: "postgresql", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}},
Type: serviceType, Type: serviceType,
}, },
{ v1.ServiceSpec{
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyType(extTrafficPolicy), ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyType(extTrafficPolicy),
LoadBalancerSourceRanges: sourceRanges, LoadBalancerSourceRanges: sourceRanges,
Ports: []v1.ServicePort{{Name: clusterName + "-pooler", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}}, Ports: []v1.ServicePort{{Name: clusterName + "-pooler", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}},
Selector: map[string]string{"connection-pooler": clusterName + "-pooler"}, Selector: map[string]string{"connection-pooler": clusterName + "-pooler"},
Type: serviceType, Type: serviceType,
}, },
{ v1.ServiceSpec{
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyType(extTrafficPolicy), ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyType(extTrafficPolicy),
LoadBalancerSourceRanges: sourceRanges, LoadBalancerSourceRanges: sourceRanges,
Ports: []v1.ServicePort{{Name: "postgresql", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}}, Ports: []v1.ServicePort{{Name: "postgresql", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}},
Selector: map[string]string{"spilo-role": "replica", "application": "spilo", "cluster-name": clusterName}, Selector: map[string]string{"spilo-role": "replica", "application": "spilo", "cluster-name": clusterName},
Type: serviceType, Type: serviceType,
}, },
{ v1.ServiceSpec{
ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyType(extTrafficPolicy), ExternalTrafficPolicy: v1.ServiceExternalTrafficPolicyType(extTrafficPolicy),
LoadBalancerSourceRanges: sourceRanges, LoadBalancerSourceRanges: sourceRanges,
Ports: []v1.ServicePort{{Name: clusterName + "-pooler-repl", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}}, Ports: []v1.ServicePort{{Name: clusterName + "-pooler-repl", Port: 5432, TargetPort: intstr.IntOrString{IntVal: 5432}}},
@ -2992,7 +2894,7 @@ func TestGenerateResourceRequirements(t *testing.T) {
}, },
Spec: acidv1.PostgresSpec{ Spec: acidv1.PostgresSpec{
Sidecars: []acidv1.Sidecar{ Sidecars: []acidv1.Sidecar{
{ acidv1.Sidecar{
Name: sidecarName, Name: sidecarName,
}, },
}, },
@ -3091,44 +2993,6 @@ func TestGenerateResourceRequirements(t *testing.T) {
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")}, ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")},
}, },
}, },
{
subTest: "test generation of resources when min limits are all set to zero",
config: config.Config{
Resources: config.Resources{
ClusterLabels: map[string]string{"application": "spilo"},
ClusterNameLabel: clusterNameLabel,
DefaultCPURequest: "0",
DefaultCPULimit: "0",
MaxCPURequest: "0",
MinCPULimit: "0",
DefaultMemoryRequest: "0",
DefaultMemoryLimit: "0",
MaxMemoryRequest: "0",
MinMemoryLimit: "0",
PodRoleLabel: "spilo-role",
},
PodManagementPolicy: "ordered_ready",
SetMemoryRequestToLimit: false,
},
pgSpec: acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: namespace,
},
Spec: acidv1.PostgresSpec{
Resources: &acidv1.Resources{
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("5m"), Memory: k8sutil.StringToPointer("5Mi")},
},
TeamID: "acid",
Volume: acidv1.Volume{
Size: "1G",
},
},
},
expectedResources: acidv1.Resources{
ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("5m"), Memory: k8sutil.StringToPointer("5Mi")},
},
},
{ {
subTest: "test matchLimitsWithRequestsIfSmaller", subTest: "test matchLimitsWithRequestsIfSmaller",
config: config.Config{ config: config.Config{
@ -3231,7 +3095,7 @@ func TestGenerateResourceRequirements(t *testing.T) {
}, },
Spec: acidv1.PostgresSpec{ Spec: acidv1.PostgresSpec{
Sidecars: []acidv1.Sidecar{ Sidecars: []acidv1.Sidecar{
{ acidv1.Sidecar{
Name: sidecarName, Name: sidecarName,
Resources: &acidv1.Resources{ Resources: &acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("10m"), Memory: k8sutil.StringToPointer("10Mi")}, ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("10m"), Memory: k8sutil.StringToPointer("10Mi")},
@ -3320,7 +3184,7 @@ func TestGenerateResourceRequirements(t *testing.T) {
}, },
Spec: acidv1.PostgresSpec{ Spec: acidv1.PostgresSpec{
Sidecars: []acidv1.Sidecar{ Sidecars: []acidv1.Sidecar{
{ acidv1.Sidecar{
Name: sidecarName, Name: sidecarName,
Resources: &acidv1.Resources{ Resources: &acidv1.Resources{
ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("10m"), Memory: k8sutil.StringToPointer("10Mi")}, ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("10m"), Memory: k8sutil.StringToPointer("10Mi")},

View File

@ -1,33 +1,23 @@
package cluster package cluster
import ( import (
"context"
"encoding/json"
"fmt" "fmt"
"strings" "strings"
"github.com/Masterminds/semver"
"github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/spec"
"github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
) )
// VersionMap Map of version numbers // VersionMap Map of version numbers
var VersionMap = map[string]int{ var VersionMap = map[string]int{
"12": 120000,
"13": 130000, "13": 130000,
"14": 140000, "14": 140000,
"15": 150000, "15": 150000,
"16": 160000, "16": 160000,
"17": 170000,
} }
const (
majorVersionUpgradeSuccessAnnotation = "last-major-upgrade-success"
majorVersionUpgradeFailureAnnotation = "last-major-upgrade-failure"
)
// IsBiggerPostgresVersion Compare two Postgres version numbers // IsBiggerPostgresVersion Compare two Postgres version numbers
func IsBiggerPostgresVersion(old string, new string) bool { func IsBiggerPostgresVersion(old string, new string) bool {
oldN := VersionMap[old] oldN := VersionMap[old]
@ -44,7 +34,7 @@ func (c *Cluster) GetDesiredMajorVersionAsInt() int {
func (c *Cluster) GetDesiredMajorVersion() string { func (c *Cluster) GetDesiredMajorVersion() string {
if c.Config.OpConfig.MajorVersionUpgradeMode == "full" { if c.Config.OpConfig.MajorVersionUpgradeMode == "full" {
// e.g. current is 13, minimal is 13 allowing 13 to 17 clusters, everything below is upgraded // e.g. current is 12, minimal is 12 allowing 12 to 16 clusters, everything below is upgraded
if IsBiggerPostgresVersion(c.Spec.PgVersion, c.Config.OpConfig.MinimalMajorVersion) { if IsBiggerPostgresVersion(c.Spec.PgVersion, c.Config.OpConfig.MinimalMajorVersion) {
c.logger.Infof("overwriting configured major version %s to %s", c.Spec.PgVersion, c.Config.OpConfig.TargetMajorVersion) c.logger.Infof("overwriting configured major version %s to %s", c.Spec.PgVersion, c.Config.OpConfig.TargetMajorVersion)
return c.Config.OpConfig.TargetMajorVersion return c.Config.OpConfig.TargetMajorVersion
@ -64,63 +54,6 @@ func (c *Cluster) isUpgradeAllowedForTeam(owningTeam string) bool {
return util.SliceContains(allowedTeams, owningTeam) return util.SliceContains(allowedTeams, owningTeam)
} }
func (c *Cluster) annotatePostgresResource(isSuccess bool) error {
annotations := make(map[string]string)
currentTime := metav1.Now().Format("2006-01-02T15:04:05Z")
if isSuccess {
annotations[majorVersionUpgradeSuccessAnnotation] = currentTime
} else {
annotations[majorVersionUpgradeFailureAnnotation] = currentTime
}
patchData, err := metaAnnotationsPatch(annotations)
if err != nil {
c.logger.Errorf("could not form patch for %s postgresql resource: %v", c.Name, err)
return err
}
_, err = c.KubeClient.Postgresqls(c.Namespace).Patch(context.Background(), c.Name, types.MergePatchType, patchData, metav1.PatchOptions{})
if err != nil {
c.logger.Errorf("failed to patch annotations to postgresql resource: %v", err)
return err
}
return nil
}
func (c *Cluster) removeFailuresAnnotation() error {
annotationToRemove := []map[string]string{
{
"op": "remove",
"path": fmt.Sprintf("/metadata/annotations/%s", majorVersionUpgradeFailureAnnotation),
},
}
removePatch, err := json.Marshal(annotationToRemove)
if err != nil {
c.logger.Errorf("could not form removal patch for %s postgresql resource: %v", c.Name, err)
return err
}
_, err = c.KubeClient.Postgresqls(c.Namespace).Patch(context.Background(), c.Name, types.JSONPatchType, removePatch, metav1.PatchOptions{})
if err != nil {
c.logger.Errorf("failed to remove annotations from postgresql resource: %v", err)
return err
}
return nil
}
func (c *Cluster) criticalOperationLabel(pods []v1.Pod, value *string) error {
metadataReq := map[string]map[string]map[string]*string{"metadata": {"labels": {"critical-operation": value}}}
patchReq, err := json.Marshal(metadataReq)
if err != nil {
return fmt.Errorf("could not marshal ObjectMeta: %v", err)
}
for _, pod := range pods {
_, err = c.KubeClient.Pods(c.Namespace).Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, patchReq, metav1.PatchOptions{})
if err != nil {
return err
}
}
return nil
}
/* /*
Execute upgrade when mode is set to manual or full or when the owning team is allowed for upgrade (and mode is "off"). Execute upgrade when mode is set to manual or full or when the owning team is allowed for upgrade (and mode is "off").
@ -136,151 +69,74 @@ func (c *Cluster) majorVersionUpgrade() error {
desiredVersion := c.GetDesiredMajorVersionAsInt() desiredVersion := c.GetDesiredMajorVersionAsInt()
if c.currentMajorVersion >= desiredVersion { if c.currentMajorVersion >= desiredVersion {
if _, exists := c.ObjectMeta.Annotations[majorVersionUpgradeFailureAnnotation]; exists { // if failure annotation exists, remove it
c.removeFailuresAnnotation()
c.logger.Infof("removing failure annotation as the cluster is already up to date")
}
c.logger.Infof("cluster version up to date. current: %d, min desired: %d", c.currentMajorVersion, desiredVersion) c.logger.Infof("cluster version up to date. current: %d, min desired: %d", c.currentMajorVersion, desiredVersion)
return nil return nil
} }
if !isInMainternanceWindow(c.Spec.MaintenanceWindows) {
c.logger.Infof("skipping major version upgrade, not in maintenance window")
return nil
}
pods, err := c.listPods() pods, err := c.listPods()
if err != nil { if err != nil {
return err return err
} }
allRunning := true allRunning := true
isStandbyCluster := false
var masterPod *v1.Pod var masterPod *v1.Pod
for i, pod := range pods { for i, pod := range pods {
ps, _ := c.patroni.GetMemberData(&pod) ps, _ := c.patroni.GetMemberData(&pod)
if ps.Role == "standby_leader" {
isStandbyCluster = true
c.currentMajorVersion = ps.ServerVersion
break
}
if ps.State != "running" { if ps.State != "running" {
allRunning = false allRunning = false
c.logger.Infof("identified non running pod, potentially skipping major version upgrade") c.logger.Infof("identified non running pod, potentially skipping major version upgrade")
} }
if ps.Role == "master" || ps.Role == "primary" { if ps.Role == "master" {
masterPod = &pods[i] masterPod = &pods[i]
c.currentMajorVersion = ps.ServerVersion c.currentMajorVersion = ps.ServerVersion
} }
} }
if masterPod == nil {
c.logger.Infof("no master in the cluster, skipping major version upgrade")
return nil
}
// Recheck version with newest data from Patroni // Recheck version with newest data from Patroni
if c.currentMajorVersion >= desiredVersion { if c.currentMajorVersion >= desiredVersion {
if _, exists := c.ObjectMeta.Annotations[majorVersionUpgradeFailureAnnotation]; exists { // if failure annotation exists, remove it
c.removeFailuresAnnotation()
c.logger.Infof("removing failure annotation as the cluster is already up to date")
}
c.logger.Infof("recheck cluster version is already up to date. current: %d, min desired: %d", c.currentMajorVersion, desiredVersion) c.logger.Infof("recheck cluster version is already up to date. current: %d, min desired: %d", c.currentMajorVersion, desiredVersion)
return nil return nil
} else if isStandbyCluster {
c.logger.Warnf("skipping major version upgrade for %s/%s standby cluster. Re-deploy standby cluster with the required Postgres version specified", c.Namespace, c.Name)
return nil
} }
if _, exists := c.ObjectMeta.Annotations[majorVersionUpgradeFailureAnnotation]; exists {
c.logger.Infof("last major upgrade failed, skipping upgrade")
return nil
}
if !isInMaintenanceWindow(c.Spec.MaintenanceWindows) {
c.logger.Infof("skipping major version upgrade, not in maintenance window")
return nil
}
members, err := c.patroni.GetClusterMembers(masterPod)
if err != nil {
c.logger.Error("could not get cluster members data from Patroni API, skipping major version upgrade")
return err
}
patroniData, err := c.patroni.GetMemberData(masterPod)
if err != nil {
c.logger.Error("could not get members data from Patroni API, skipping major version upgrade")
return err
}
patroniVer, err := semver.NewVersion(patroniData.Patroni.Version)
if err != nil {
c.logger.Error("error parsing Patroni version")
patroniVer, _ = semver.NewVersion("3.0.4")
}
verConstraint, _ := semver.NewConstraint(">= 3.0.4")
checkStreaming, _ := verConstraint.Validate(patroniVer)
for _, member := range members {
if PostgresRole(member.Role) == Leader {
continue
}
if checkStreaming && member.State != "streaming" {
c.logger.Infof("skipping major version upgrade, replica %s is not streaming from primary", member.Name)
return nil
}
if member.Lag > 16*1024*1024 {
c.logger.Infof("skipping major version upgrade, replication lag on member %s is too high", member.Name)
return nil
}
}
isUpgradeSuccess := true
numberOfPods := len(pods) numberOfPods := len(pods)
if allRunning { if allRunning && masterPod != nil {
c.logger.Infof("healthy cluster ready to upgrade, current: %d desired: %d", c.currentMajorVersion, desiredVersion) c.logger.Infof("healthy cluster ready to upgrade, current: %d desired: %d", c.currentMajorVersion, desiredVersion)
if c.currentMajorVersion < desiredVersion { if c.currentMajorVersion < desiredVersion {
defer func() error {
if err = c.criticalOperationLabel(pods, nil); err != nil {
return fmt.Errorf("failed to remove critical-operation label: %s", err)
}
return nil
}()
val := "true"
if err = c.criticalOperationLabel(pods, &val); err != nil {
return fmt.Errorf("failed to assign critical-operation label: %s", err)
}
podName := &spec.NamespacedName{Namespace: masterPod.Namespace, Name: masterPod.Name} podName := &spec.NamespacedName{Namespace: masterPod.Namespace, Name: masterPod.Name}
c.logger.Infof("triggering major version upgrade on pod %s of %d pods", masterPod.Name, numberOfPods) c.logger.Infof("triggering major version upgrade on pod %s of %d pods", masterPod.Name, numberOfPods)
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Major Version Upgrade", "starting major version upgrade on pod %s of %d pods", masterPod.Name, numberOfPods) c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Major Version Upgrade", "starting major version upgrade on pod %s of %d pods", masterPod.Name, numberOfPods)
upgradeCommand := fmt.Sprintf("set -o pipefail && /usr/bin/python3 /scripts/inplace_upgrade.py %d 2>&1 | tee last_upgrade.log", numberOfPods) upgradeCommand := fmt.Sprintf("set -o pipefail && /usr/bin/python3 /scripts/inplace_upgrade.py %d 2>&1 | tee last_upgrade.log", numberOfPods)
c.logger.Debug("checking if the spilo image runs with root or non-root (check for user id=0)") c.logger.Debugf("checking if the spilo image runs with root or non-root (check for user id=0)")
resultIdCheck, errIdCheck := c.ExecCommand(podName, "/bin/bash", "-c", "/usr/bin/id -u") resultIdCheck, errIdCheck := c.ExecCommand(podName, "/bin/bash", "-c", "/usr/bin/id -u")
if errIdCheck != nil { if errIdCheck != nil {
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Major Version Upgrade", "checking user id to run upgrade from %d to %d FAILED: %v", c.currentMajorVersion, desiredVersion, errIdCheck) c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Major Version Upgrade", "checking user id to run upgrade from %d to %d FAILED: %v", c.currentMajorVersion, desiredVersion, errIdCheck)
} }
resultIdCheck = strings.TrimSuffix(resultIdCheck, "\n") resultIdCheck = strings.TrimSuffix(resultIdCheck, "\n")
var result, scriptErrMsg string var result string
if resultIdCheck != "0" { if resultIdCheck != "0" {
c.logger.Infof("user id was identified as: %s, hence default user is non-root already", resultIdCheck) c.logger.Infof("user id was identified as: %s, hence default user is non-root already", resultIdCheck)
result, err = c.ExecCommand(podName, "/bin/bash", "-c", upgradeCommand) result, err = c.ExecCommand(podName, "/bin/bash", "-c", upgradeCommand)
scriptErrMsg, _ = c.ExecCommand(podName, "/bin/bash", "-c", "tail -n 1 last_upgrade.log")
} else { } else {
c.logger.Infof("user id was identified as: %s, using su to reach the postgres user", resultIdCheck) c.logger.Infof("user id was identified as: %s, using su to reach the postgres user", resultIdCheck)
result, err = c.ExecCommand(podName, "/bin/su", "postgres", "-c", upgradeCommand) result, err = c.ExecCommand(podName, "/bin/su", "postgres", "-c", upgradeCommand)
scriptErrMsg, _ = c.ExecCommand(podName, "/bin/bash", "-c", "tail -n 1 last_upgrade.log")
} }
if err != nil { if err != nil {
isUpgradeSuccess = false c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Major Version Upgrade", "upgrade from %d to %d FAILED: %v", c.currentMajorVersion, desiredVersion, err)
c.annotatePostgresResource(isUpgradeSuccess) return err
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Major Version Upgrade", "upgrade from %d to %d FAILED: %v", c.currentMajorVersion, desiredVersion, scriptErrMsg)
return fmt.Errorf("%s", scriptErrMsg)
} }
c.annotatePostgresResource(isUpgradeSuccess)
c.logger.Infof("upgrade action triggered and command completed: %s", result[:100]) c.logger.Infof("upgrade action triggered and command completed: %s", result[:100])
c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Major Version Upgrade", "upgrade from %d to %d finished", c.currentMajorVersion, desiredVersion) c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Major Version Upgrade", "upgrade from %d to %d finished", c.currentMajorVersion, desiredVersion)
} }
} }

View File

@ -3,11 +3,12 @@ package cluster
import ( import (
"context" "context"
"fmt" "fmt"
"slices"
"sort" "sort"
"strconv" "strconv"
"time" "time"
"golang.org/x/exp/slices"
appsv1 "k8s.io/api/apps/v1" appsv1 "k8s.io/api/apps/v1"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
@ -58,7 +59,7 @@ func (c *Cluster) markRollingUpdateFlagForPod(pod *v1.Pod, msg string) error {
return nil return nil
} }
c.logger.Infof("mark rolling update annotation for %s: reason %s", pod.Name, msg) c.logger.Debugf("mark rolling update annotation for %s: reason %s", pod.Name, msg)
flag := make(map[string]string) flag := make(map[string]string)
flag[rollingUpdatePodAnnotationKey] = strconv.FormatBool(true) flag[rollingUpdatePodAnnotationKey] = strconv.FormatBool(true)
@ -109,7 +110,7 @@ func (c *Cluster) getRollingUpdateFlagFromPod(pod *v1.Pod) (flag bool) {
} }
func (c *Cluster) deletePods() error { func (c *Cluster) deletePods() error {
c.logger.Debug("deleting pods") c.logger.Debugln("deleting pods")
pods, err := c.listPods() pods, err := c.listPods()
if err != nil { if err != nil {
return err return err
@ -126,9 +127,9 @@ func (c *Cluster) deletePods() error {
} }
} }
if len(pods) > 0 { if len(pods) > 0 {
c.logger.Debug("pods have been deleted") c.logger.Debugln("pods have been deleted")
} else { } else {
c.logger.Debug("no pods to delete") c.logger.Debugln("no pods to delete")
} }
return nil return nil
@ -229,7 +230,7 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error {
return fmt.Errorf("could not get node %q: %v", oldMaster.Spec.NodeName, err) return fmt.Errorf("could not get node %q: %v", oldMaster.Spec.NodeName, err)
} }
if !eol { if !eol {
c.logger.Debug("no action needed: master pod is already on a live node") c.logger.Debugf("no action needed: master pod is already on a live node")
return nil return nil
} }
@ -279,16 +280,11 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error {
return fmt.Errorf("could not move pod: %v", err) return fmt.Errorf("could not move pod: %v", err)
} }
scheduleSwitchover := false
if !isInMaintenanceWindow(c.Spec.MaintenanceWindows) {
c.logger.Infof("postponing switchover, not in maintenance window")
scheduleSwitchover = true
}
err = retryutil.Retry(1*time.Minute, 5*time.Minute, err = retryutil.Retry(1*time.Minute, 5*time.Minute,
func() (bool, error) { func() (bool, error) {
err := c.Switchover(oldMaster, masterCandidateName, scheduleSwitchover) err := c.Switchover(oldMaster, masterCandidateName)
if err != nil { if err != nil {
c.logger.Errorf("could not switchover to pod %q: %v", masterCandidateName, err) c.logger.Errorf("could not failover to pod %q: %v", masterCandidateName, err)
return false, nil return false, nil
} }
return true, nil return true, nil
@ -432,10 +428,9 @@ func (c *Cluster) recreatePods(pods []v1.Pod, switchoverCandidates []spec.Namesp
} }
newRole := PostgresRole(newPod.Labels[c.OpConfig.PodRoleLabel]) newRole := PostgresRole(newPod.Labels[c.OpConfig.PodRoleLabel])
switch newRole { if newRole == Replica {
case Replica:
replicas = append(replicas, util.NameFromMeta(pod.ObjectMeta)) replicas = append(replicas, util.NameFromMeta(pod.ObjectMeta))
case Master: } else if newRole == Master {
newMasterPod = newPod newMasterPod = newPod
} }
} }
@ -450,7 +445,7 @@ func (c *Cluster) recreatePods(pods []v1.Pod, switchoverCandidates []spec.Namesp
// do not recreate master now so it will keep the update flag and switchover will be retried on next sync // do not recreate master now so it will keep the update flag and switchover will be retried on next sync
return fmt.Errorf("skipping switchover: %v", err) return fmt.Errorf("skipping switchover: %v", err)
} }
if err := c.Switchover(masterPod, masterCandidate, false); err != nil { if err := c.Switchover(masterPod, masterCandidate); err != nil {
return fmt.Errorf("could not perform switch over: %v", err) return fmt.Errorf("could not perform switch over: %v", err)
} }
} else if newMasterPod == nil && len(replicas) == 0 { } else if newMasterPod == nil && len(replicas) == 0 {
@ -485,9 +480,6 @@ func (c *Cluster) getSwitchoverCandidate(master *v1.Pod) (spec.NamespacedName, e
if PostgresRole(member.Role) == SyncStandby { if PostgresRole(member.Role) == SyncStandby {
syncCandidates = append(syncCandidates, member) syncCandidates = append(syncCandidates, member)
} }
if PostgresRole(member.Role) != Leader && PostgresRole(member.Role) != StandbyLeader && slices.Contains([]string{"running", "streaming", "in archive recovery"}, member.State) {
candidates = append(candidates, member)
}
} }
// if synchronous mode is enabled and no SyncStandy was found // if synchronous mode is enabled and no SyncStandy was found
@ -497,12 +489,6 @@ func (c *Cluster) getSwitchoverCandidate(master *v1.Pod) (spec.NamespacedName, e
return false, nil return false, nil
} }
// retry also in asynchronous mode when no replica candidate was found
if !c.Spec.Patroni.SynchronousMode && len(candidates) == 0 {
c.logger.Warnf("no replica candidate found - retrying fetching cluster members")
return false, nil
}
return true, nil return true, nil
}, },
) )
@ -516,13 +502,25 @@ func (c *Cluster) getSwitchoverCandidate(master *v1.Pod) (spec.NamespacedName, e
return syncCandidates[i].Lag < syncCandidates[j].Lag return syncCandidates[i].Lag < syncCandidates[j].Lag
}) })
return spec.NamespacedName{Namespace: master.Namespace, Name: syncCandidates[0].Name}, nil return spec.NamespacedName{Namespace: master.Namespace, Name: syncCandidates[0].Name}, nil
} else {
// in asynchronous mode find running replicas
for _, member := range members {
if PostgresRole(member.Role) == Leader || PostgresRole(member.Role) == StandbyLeader {
continue
} }
if slices.Contains([]string{"running", "streaming", "in archive recovery"}, member.State) {
candidates = append(candidates, member)
}
}
if len(candidates) > 0 { if len(candidates) > 0 {
sort.Slice(candidates, func(i, j int) bool { sort.Slice(candidates, func(i, j int) bool {
return candidates[i].Lag < candidates[j].Lag return candidates[i].Lag < candidates[j].Lag
}) })
return spec.NamespacedName{Namespace: master.Namespace, Name: candidates[0].Name}, nil return spec.NamespacedName{Namespace: master.Namespace, Name: candidates[0].Name}, nil
} }
}
return spec.NamespacedName{}, fmt.Errorf("no switchover candidate found") return spec.NamespacedName{}, fmt.Errorf("no switchover candidate found")
} }

View File

@ -62,7 +62,7 @@ func TestGetSwitchoverCandidate(t *testing.T) {
expectedError: nil, expectedError: nil,
}, },
{ {
subtest: "choose first replica when lag is equal everywhere", subtest: "choose first replica when lag is equal evrywhere",
clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "replica", "state": "streaming", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 5}, {"name": "acid-test-cluster-2", "role": "replica", "state": "running", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": 5}]}`, clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 1}, {"name": "acid-test-cluster-1", "role": "replica", "state": "streaming", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 1, "lag": 5}, {"name": "acid-test-cluster-2", "role": "replica", "state": "running", "api_url": "http://192.168.100.3:8008/patroni", "host": "192.168.100.3", "port": 5432, "timeline": 1, "lag": 5}]}`,
syncModeEnabled: false, syncModeEnabled: false,
expectedCandidate: spec.NamespacedName{Namespace: namespace, Name: "acid-test-cluster-1"}, expectedCandidate: spec.NamespacedName{Namespace: namespace, Name: "acid-test-cluster-1"},
@ -73,7 +73,7 @@ func TestGetSwitchoverCandidate(t *testing.T) {
clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 2}, {"name": "acid-test-cluster-1", "role": "replica", "state": "starting", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 2}]}`, clusterJson: `{"members": [{"name": "acid-test-cluster-0", "role": "leader", "state": "running", "api_url": "http://192.168.100.1:8008/patroni", "host": "192.168.100.1", "port": 5432, "timeline": 2}, {"name": "acid-test-cluster-1", "role": "replica", "state": "starting", "api_url": "http://192.168.100.2:8008/patroni", "host": "192.168.100.2", "port": 5432, "timeline": 2}]}`,
syncModeEnabled: false, syncModeEnabled: false,
expectedCandidate: spec.NamespacedName{}, expectedCandidate: spec.NamespacedName{},
expectedError: fmt.Errorf("failed to get Patroni cluster members: unexpected end of JSON input"), expectedError: fmt.Errorf("no switchover candidate found"),
}, },
{ {
subtest: "replicas with different status", subtest: "replicas with different status",

View File

@ -23,13 +23,8 @@ const (
) )
func (c *Cluster) listResources() error { func (c *Cluster) listResources() error {
if c.PrimaryPodDisruptionBudget != nil { if c.PodDisruptionBudget != nil {
c.logger.Infof("found primary pod disruption budget: %q (uid: %q)", util.NameFromMeta(c.PrimaryPodDisruptionBudget.ObjectMeta), c.PrimaryPodDisruptionBudget.UID) c.logger.Infof("found pod disruption budget: %q (uid: %q)", util.NameFromMeta(c.PodDisruptionBudget.ObjectMeta), c.PodDisruptionBudget.UID)
}
if c.CriticalOpPodDisruptionBudget != nil {
c.logger.Infof("found pod disruption budget for critical operations: %q (uid: %q)", util.NameFromMeta(c.CriticalOpPodDisruptionBudget.ObjectMeta), c.CriticalOpPodDisruptionBudget.UID)
} }
if c.Statefulset != nil { if c.Statefulset != nil {
@ -44,8 +39,8 @@ func (c *Cluster) listResources() error {
c.logger.Infof("found logical backup job: %q (uid: %q)", util.NameFromMeta(c.LogicalBackupJob.ObjectMeta), c.LogicalBackupJob.UID) c.logger.Infof("found logical backup job: %q (uid: %q)", util.NameFromMeta(c.LogicalBackupJob.ObjectMeta), c.LogicalBackupJob.UID)
} }
for uid, secret := range c.Secrets { for _, secret := range c.Secrets {
c.logger.Infof("found secret: %q (uid: %q) namespace: %s", util.NameFromMeta(secret.ObjectMeta), uid, secret.ObjectMeta.Namespace) c.logger.Infof("found secret: %q (uid: %q) namespace: %s", util.NameFromMeta(secret.ObjectMeta), secret.UID, secret.ObjectMeta.Namespace)
} }
for role, service := range c.Services { for role, service := range c.Services {
@ -75,8 +70,13 @@ func (c *Cluster) listResources() error {
c.logger.Infof("found pod: %q (uid: %q)", util.NameFromMeta(obj.ObjectMeta), obj.UID) c.logger.Infof("found pod: %q (uid: %q)", util.NameFromMeta(obj.ObjectMeta), obj.UID)
} }
for uid, pvc := range c.VolumeClaims { pvcs, err := c.listPersistentVolumeClaims()
c.logger.Infof("found persistent volume claim: %q (uid: %q)", util.NameFromMeta(pvc.ObjectMeta), uid) if err != nil {
return fmt.Errorf("could not get the list of PVCs: %v", err)
}
for _, obj := range pvcs {
c.logger.Infof("found PVC: %q (uid: %q)", util.NameFromMeta(obj.ObjectMeta), obj.UID)
} }
for role, poolerObjs := range c.ConnectionPooler { for role, poolerObjs := range c.ConnectionPooler {
@ -94,12 +94,12 @@ func (c *Cluster) listResources() error {
func (c *Cluster) createStatefulSet() (*appsv1.StatefulSet, error) { func (c *Cluster) createStatefulSet() (*appsv1.StatefulSet, error) {
c.setProcessName("creating statefulset") c.setProcessName("creating statefulset")
// check if it's allowed that spec contains initContainers // check if it's allowed that spec contains initContainers
if len(c.Spec.InitContainers) > 0 && if c.Spec.InitContainers != nil && len(c.Spec.InitContainers) > 0 &&
c.OpConfig.EnableInitContainers != nil && !(*c.OpConfig.EnableInitContainers) { c.OpConfig.EnableInitContainers != nil && !(*c.OpConfig.EnableInitContainers) {
return nil, fmt.Errorf("initContainers specified but disabled in configuration") return nil, fmt.Errorf("initContainers specified but disabled in configuration")
} }
// check if it's allowed that spec contains sidecars // check if it's allowed that spec contains sidecars
if len(c.Spec.Sidecars) > 0 && if c.Spec.Sidecars != nil && len(c.Spec.Sidecars) > 0 &&
c.OpConfig.EnableSidecars != nil && !(*c.OpConfig.EnableSidecars) { c.OpConfig.EnableSidecars != nil && !(*c.OpConfig.EnableSidecars) {
return nil, fmt.Errorf("sidecar containers specified but disabled in configuration") return nil, fmt.Errorf("sidecar containers specified but disabled in configuration")
} }
@ -167,8 +167,8 @@ func (c *Cluster) preScaleDown(newStatefulSet *appsv1.StatefulSet) error {
return fmt.Errorf("pod %q does not belong to cluster", podName) return fmt.Errorf("pod %q does not belong to cluster", podName)
} }
if err := c.patroni.Switchover(&masterPod[0], masterCandidatePod.Name, ""); err != nil { if err := c.patroni.Switchover(&masterPod[0], masterCandidatePod.Name); err != nil {
return fmt.Errorf("could not switchover: %v", err) return fmt.Errorf("could not failover: %v", err)
} }
return nil return nil
@ -187,7 +187,7 @@ func (c *Cluster) updateStatefulSet(newStatefulSet *appsv1.StatefulSet) error {
c.logger.Warningf("could not scale down: %v", err) c.logger.Warningf("could not scale down: %v", err)
} }
} }
c.logger.Debug("updating statefulset") c.logger.Debugf("updating statefulset")
patchData, err := specPatch(newStatefulSet.Spec) patchData, err := specPatch(newStatefulSet.Spec)
if err != nil { if err != nil {
@ -218,7 +218,7 @@ func (c *Cluster) replaceStatefulSet(newStatefulSet *appsv1.StatefulSet) error {
} }
statefulSetName := util.NameFromMeta(c.Statefulset.ObjectMeta) statefulSetName := util.NameFromMeta(c.Statefulset.ObjectMeta)
c.logger.Debug("replacing statefulset") c.logger.Debugf("replacing statefulset")
// Delete the current statefulset without deleting the pods // Delete the current statefulset without deleting the pods
deletePropagationPolicy := metav1.DeletePropagationOrphan deletePropagationPolicy := metav1.DeletePropagationOrphan
@ -232,7 +232,7 @@ func (c *Cluster) replaceStatefulSet(newStatefulSet *appsv1.StatefulSet) error {
// make sure we clear the stored statefulset status if the subsequent create fails. // make sure we clear the stored statefulset status if the subsequent create fails.
c.Statefulset = nil c.Statefulset = nil
// wait until the statefulset is truly deleted // wait until the statefulset is truly deleted
c.logger.Debug("waiting for the statefulset to be deleted") c.logger.Debugf("waiting for the statefulset to be deleted")
err = retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout, err = retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout,
func() (bool, error) { func() (bool, error) {
@ -266,7 +266,7 @@ func (c *Cluster) replaceStatefulSet(newStatefulSet *appsv1.StatefulSet) error {
func (c *Cluster) deleteStatefulSet() error { func (c *Cluster) deleteStatefulSet() error {
c.setProcessName("deleting statefulset") c.setProcessName("deleting statefulset")
c.logger.Debug("deleting statefulset") c.logger.Debugln("deleting statefulset")
if c.Statefulset == nil { if c.Statefulset == nil {
c.logger.Debug("there is no statefulset in the cluster") c.logger.Debug("there is no statefulset in the cluster")
return nil return nil
@ -288,10 +288,10 @@ func (c *Cluster) deleteStatefulSet() error {
if c.OpConfig.EnablePersistentVolumeClaimDeletion != nil && *c.OpConfig.EnablePersistentVolumeClaimDeletion { if c.OpConfig.EnablePersistentVolumeClaimDeletion != nil && *c.OpConfig.EnablePersistentVolumeClaimDeletion {
if err := c.deletePersistentVolumeClaims(); err != nil { if err := c.deletePersistentVolumeClaims(); err != nil {
return fmt.Errorf("could not delete persistent volume claims: %v", err) return fmt.Errorf("could not delete PersistentVolumeClaims: %v", err)
} }
} else { } else {
c.logger.Info("not deleting persistent volume claims because disabled in configuration") c.logger.Info("not deleting PersistentVolumeClaims because disabled in configuration")
} }
return nil return nil
@ -334,7 +334,7 @@ func (c *Cluster) updateService(role PostgresRole, oldService *v1.Service, newSe
} }
} }
if changed, _ := c.compareAnnotations(oldService.Annotations, newService.Annotations, nil); changed { if changed, _ := c.compareAnnotations(oldService.Annotations, newService.Annotations); changed {
patchData, err := metaAnnotationsPatch(newService.Annotations) patchData, err := metaAnnotationsPatch(newService.Annotations)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not form patch for service %q annotations: %v", oldService.Name, err) return nil, fmt.Errorf("could not form patch for service %q annotations: %v", oldService.Name, err)
@ -349,8 +349,7 @@ func (c *Cluster) updateService(role PostgresRole, oldService *v1.Service, newSe
} }
func (c *Cluster) deleteService(role PostgresRole) error { func (c *Cluster) deleteService(role PostgresRole) error {
c.setProcessName("deleting service") c.logger.Debugf("deleting service %s", role)
c.logger.Debugf("deleting %s service", role)
if c.Services[role] == nil { if c.Services[role] == nil {
c.logger.Debugf("No service for %s role was found, nothing to delete", role) c.logger.Debugf("No service for %s role was found, nothing to delete", role)
@ -422,128 +421,59 @@ func (c *Cluster) generateEndpointSubsets(role PostgresRole) []v1.EndpointSubset
return result return result
} }
func (c *Cluster) createPrimaryPodDisruptionBudget() error { func (c *Cluster) createPodDisruptionBudget() (*policyv1.PodDisruptionBudget, error) {
c.logger.Debug("creating primary pod disruption budget") podDisruptionBudgetSpec := c.generatePodDisruptionBudget()
if c.PrimaryPodDisruptionBudget != nil {
c.logger.Warning("primary pod disruption budget already exists in the cluster")
return nil
}
podDisruptionBudgetSpec := c.generatePrimaryPodDisruptionBudget()
podDisruptionBudget, err := c.KubeClient. podDisruptionBudget, err := c.KubeClient.
PodDisruptionBudgets(podDisruptionBudgetSpec.Namespace). PodDisruptionBudgets(podDisruptionBudgetSpec.Namespace).
Create(context.TODO(), podDisruptionBudgetSpec, metav1.CreateOptions{}) Create(context.TODO(), podDisruptionBudgetSpec, metav1.CreateOptions{})
if err != nil { if err != nil {
return err return nil, err
} }
c.logger.Infof("primary pod disruption budget %q has been successfully created", util.NameFromMeta(podDisruptionBudget.ObjectMeta)) c.PodDisruptionBudget = podDisruptionBudget
c.PrimaryPodDisruptionBudget = podDisruptionBudget
return nil return podDisruptionBudget, nil
} }
func (c *Cluster) createCriticalOpPodDisruptionBudget() error { func (c *Cluster) updatePodDisruptionBudget(pdb *policyv1.PodDisruptionBudget) error {
c.logger.Debug("creating pod disruption budget for critical operations") if c.PodDisruptionBudget == nil {
if c.CriticalOpPodDisruptionBudget != nil { return fmt.Errorf("there is no pod disruption budget in the cluster")
c.logger.Warning("pod disruption budget for critical operations already exists in the cluster")
return nil
} }
podDisruptionBudgetSpec := c.generateCriticalOpPodDisruptionBudget() if err := c.deletePodDisruptionBudget(); err != nil {
podDisruptionBudget, err := c.KubeClient. return fmt.Errorf("could not delete pod disruption budget: %v", err)
PodDisruptionBudgets(podDisruptionBudgetSpec.Namespace).
Create(context.TODO(), podDisruptionBudgetSpec, metav1.CreateOptions{})
if err != nil {
return err
}
c.logger.Infof("pod disruption budget for critical operations %q has been successfully created", util.NameFromMeta(podDisruptionBudget.ObjectMeta))
c.CriticalOpPodDisruptionBudget = podDisruptionBudget
return nil
}
func (c *Cluster) createPodDisruptionBudgets() error {
errors := make([]string, 0)
err := c.createPrimaryPodDisruptionBudget()
if err != nil {
errors = append(errors, fmt.Sprintf("could not create primary pod disruption budget: %v", err))
}
err = c.createCriticalOpPodDisruptionBudget()
if err != nil {
errors = append(errors, fmt.Sprintf("could not create pod disruption budget for critical operations: %v", err))
}
if len(errors) > 0 {
return fmt.Errorf("%v", strings.Join(errors, `', '`))
}
return nil
}
func (c *Cluster) updatePrimaryPodDisruptionBudget(pdb *policyv1.PodDisruptionBudget) error {
c.logger.Debug("updating primary pod disruption budget")
if c.PrimaryPodDisruptionBudget == nil {
return fmt.Errorf("there is no primary pod disruption budget in the cluster")
}
if err := c.deletePrimaryPodDisruptionBudget(); err != nil {
return fmt.Errorf("could not delete primary pod disruption budget: %v", err)
} }
newPdb, err := c.KubeClient. newPdb, err := c.KubeClient.
PodDisruptionBudgets(pdb.Namespace). PodDisruptionBudgets(pdb.Namespace).
Create(context.TODO(), pdb, metav1.CreateOptions{}) Create(context.TODO(), pdb, metav1.CreateOptions{})
if err != nil { if err != nil {
return fmt.Errorf("could not create primary pod disruption budget: %v", err) return fmt.Errorf("could not create pod disruption budget: %v", err)
} }
c.PrimaryPodDisruptionBudget = newPdb c.PodDisruptionBudget = newPdb
return nil return nil
} }
func (c *Cluster) updateCriticalOpPodDisruptionBudget(pdb *policyv1.PodDisruptionBudget) error { func (c *Cluster) deletePodDisruptionBudget() error {
c.logger.Debug("updating pod disruption budget for critical operations") c.logger.Debug("deleting pod disruption budget")
if c.CriticalOpPodDisruptionBudget == nil { if c.PodDisruptionBudget == nil {
return fmt.Errorf("there is no pod disruption budget for critical operations in the cluster") c.logger.Debug("there is no pod disruption budget in the cluster")
}
if err := c.deleteCriticalOpPodDisruptionBudget(); err != nil {
return fmt.Errorf("could not delete pod disruption budget for critical operations: %v", err)
}
newPdb, err := c.KubeClient.
PodDisruptionBudgets(pdb.Namespace).
Create(context.TODO(), pdb, metav1.CreateOptions{})
if err != nil {
return fmt.Errorf("could not create pod disruption budget for critical operations: %v", err)
}
c.CriticalOpPodDisruptionBudget = newPdb
return nil return nil
} }
func (c *Cluster) deletePrimaryPodDisruptionBudget() error { pdbName := util.NameFromMeta(c.PodDisruptionBudget.ObjectMeta)
c.logger.Debug("deleting primary pod disruption budget")
if c.PrimaryPodDisruptionBudget == nil {
c.logger.Debug("there is no primary pod disruption budget in the cluster")
return nil
}
pdbName := util.NameFromMeta(c.PrimaryPodDisruptionBudget.ObjectMeta)
err := c.KubeClient. err := c.KubeClient.
PodDisruptionBudgets(c.PrimaryPodDisruptionBudget.Namespace). PodDisruptionBudgets(c.PodDisruptionBudget.Namespace).
Delete(context.TODO(), c.PrimaryPodDisruptionBudget.Name, c.deleteOptions) Delete(context.TODO(), c.PodDisruptionBudget.Name, c.deleteOptions)
if k8sutil.ResourceNotFound(err) { if k8sutil.ResourceNotFound(err) {
c.logger.Debugf("PodDisruptionBudget %q has already been deleted", util.NameFromMeta(c.PrimaryPodDisruptionBudget.ObjectMeta)) c.logger.Debugf("PodDisruptionBudget %q has already been deleted", util.NameFromMeta(c.PodDisruptionBudget.ObjectMeta))
} else if err != nil { } else if err != nil {
return fmt.Errorf("could not delete primary pod disruption budget: %v", err) return fmt.Errorf("could not delete PodDisruptionBudget: %v", err)
} }
c.logger.Infof("pod disruption budget %q has been deleted", util.NameFromMeta(c.PrimaryPodDisruptionBudget.ObjectMeta)) c.logger.Infof("pod disruption budget %q has been deleted", util.NameFromMeta(c.PodDisruptionBudget.ObjectMeta))
c.PrimaryPodDisruptionBudget = nil c.PodDisruptionBudget = nil
err = retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout, err = retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout,
func() (bool, error) { func() (bool, error) {
@ -557,70 +487,15 @@ func (c *Cluster) deletePrimaryPodDisruptionBudget() error {
return false, err2 return false, err2
}) })
if err != nil { if err != nil {
return fmt.Errorf("could not delete primary pod disruption budget: %v", err) return fmt.Errorf("could not delete pod disruption budget: %v", err)
} }
return nil return nil
} }
func (c *Cluster) deleteCriticalOpPodDisruptionBudget() error {
c.logger.Debug("deleting pod disruption budget for critical operations")
if c.CriticalOpPodDisruptionBudget == nil {
c.logger.Debug("there is no pod disruption budget for critical operations in the cluster")
return nil
}
pdbName := util.NameFromMeta(c.CriticalOpPodDisruptionBudget.ObjectMeta)
err := c.KubeClient.
PodDisruptionBudgets(c.CriticalOpPodDisruptionBudget.Namespace).
Delete(context.TODO(), c.CriticalOpPodDisruptionBudget.Name, c.deleteOptions)
if k8sutil.ResourceNotFound(err) {
c.logger.Debugf("PodDisruptionBudget %q has already been deleted", util.NameFromMeta(c.CriticalOpPodDisruptionBudget.ObjectMeta))
} else if err != nil {
return fmt.Errorf("could not delete pod disruption budget for critical operations: %v", err)
}
c.logger.Infof("pod disruption budget %q has been deleted", util.NameFromMeta(c.CriticalOpPodDisruptionBudget.ObjectMeta))
c.CriticalOpPodDisruptionBudget = nil
err = retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout,
func() (bool, error) {
_, err2 := c.KubeClient.PodDisruptionBudgets(pdbName.Namespace).Get(context.TODO(), pdbName.Name, metav1.GetOptions{})
if err2 == nil {
return false, nil
}
if k8sutil.ResourceNotFound(err2) {
return true, nil
}
return false, err2
})
if err != nil {
return fmt.Errorf("could not delete pod disruption budget for critical operations: %v", err)
}
return nil
}
func (c *Cluster) deletePodDisruptionBudgets() error {
errors := make([]string, 0)
if err := c.deletePrimaryPodDisruptionBudget(); err != nil {
errors = append(errors, fmt.Sprintf("%v", err))
}
if err := c.deleteCriticalOpPodDisruptionBudget(); err != nil {
errors = append(errors, fmt.Sprintf("%v", err))
}
if len(errors) > 0 {
return fmt.Errorf("%v", strings.Join(errors, `', '`))
}
return nil
}
func (c *Cluster) deleteEndpoint(role PostgresRole) error { func (c *Cluster) deleteEndpoint(role PostgresRole) error {
c.setProcessName("deleting endpoint") c.setProcessName("deleting endpoint")
c.logger.Debugf("deleting %s endpoint", role) c.logger.Debugln("deleting endpoint")
if c.Endpoints[role] == nil { if c.Endpoints[role] == nil {
c.logger.Debugf("there is no %s endpoint in the cluster", role) c.logger.Debugf("there is no %s endpoint in the cluster", role)
return nil return nil
@ -668,7 +543,7 @@ func (c *Cluster) deletePatroniResources() error {
func (c *Cluster) deletePatroniConfigMap(suffix string) error { func (c *Cluster) deletePatroniConfigMap(suffix string) error {
c.setProcessName("deleting Patroni config map") c.setProcessName("deleting Patroni config map")
c.logger.Debugf("deleting %s Patroni config map", suffix) c.logger.Debugln("deleting Patroni config map")
cm := c.PatroniConfigMaps[suffix] cm := c.PatroniConfigMaps[suffix]
if cm == nil { if cm == nil {
c.logger.Debugf("there is no %s Patroni config map in the cluster", suffix) c.logger.Debugf("there is no %s Patroni config map in the cluster", suffix)
@ -690,7 +565,7 @@ func (c *Cluster) deletePatroniConfigMap(suffix string) error {
func (c *Cluster) deletePatroniEndpoint(suffix string) error { func (c *Cluster) deletePatroniEndpoint(suffix string) error {
c.setProcessName("deleting Patroni endpoint") c.setProcessName("deleting Patroni endpoint")
c.logger.Debugf("deleting %s Patroni endpoint", suffix) c.logger.Debugln("deleting Patroni endpoint")
ep := c.PatroniEndpoints[suffix] ep := c.PatroniEndpoints[suffix]
if ep == nil { if ep == nil {
c.logger.Debugf("there is no %s Patroni endpoint in the cluster", suffix) c.logger.Debugf("there is no %s Patroni endpoint in the cluster", suffix)
@ -834,12 +709,7 @@ func (c *Cluster) GetStatefulSet() *appsv1.StatefulSet {
return c.Statefulset return c.Statefulset
} }
// GetPrimaryPodDisruptionBudget returns cluster's primary kubernetes PodDisruptionBudget // GetPodDisruptionBudget returns cluster's kubernetes PodDisruptionBudget
func (c *Cluster) GetPrimaryPodDisruptionBudget() *policyv1.PodDisruptionBudget { func (c *Cluster) GetPodDisruptionBudget() *policyv1.PodDisruptionBudget {
return c.PrimaryPodDisruptionBudget return c.PodDisruptionBudget
}
// GetCriticalOpPodDisruptionBudget returns cluster's kubernetes PodDisruptionBudget for critical operations
func (c *Cluster) GetCriticalOpPodDisruptionBudget() *policyv1.PodDisruptionBudget {
return c.CriticalOpPodDisruptionBudget
} }

View File

@ -46,13 +46,11 @@ func (c *Cluster) updateStreams(newEventStreams *zalandov1.FabricEventStream) (p
func (c *Cluster) deleteStream(appId string) error { func (c *Cluster) deleteStream(appId string) error {
c.setProcessName("deleting event stream") c.setProcessName("deleting event stream")
c.logger.Debugf("deleting event stream with applicationId %s", appId)
err := c.KubeClient.FabricEventStreams(c.Streams[appId].Namespace).Delete(context.TODO(), c.Streams[appId].Name, metav1.DeleteOptions{}) err := c.KubeClient.FabricEventStreams(c.Streams[appId].Namespace).Delete(context.TODO(), c.Streams[appId].Name, metav1.DeleteOptions{})
if err != nil { if err != nil {
return fmt.Errorf("could not delete event stream %q with applicationId %s: %v", c.Streams[appId].Name, appId, err) return fmt.Errorf("could not delete event stream %q with applicationId %s: %v", c.Streams[appId].Name, appId, err)
} }
c.logger.Infof("event stream %q with applicationId %s has been successfully deleted", c.Streams[appId].Name, appId)
delete(c.Streams, appId) delete(c.Streams, appId)
return nil return nil
@ -114,10 +112,10 @@ func (c *Cluster) syncPublication(dbName string, databaseSlotsList map[string]za
} }
for slotName, slotAndPublication := range databaseSlotsList { for slotName, slotAndPublication := range databaseSlotsList {
newTables := slotAndPublication.Publication tables := slotAndPublication.Publication
tableNames := make([]string, len(newTables)) tableNames := make([]string, len(tables))
i := 0 i := 0
for t := range newTables { for t := range tables {
tableName, schemaName := getTableSchema(t) tableName, schemaName := getTableSchema(t)
tableNames[i] = fmt.Sprintf("%s.%s", schemaName, tableName) tableNames[i] = fmt.Sprintf("%s.%s", schemaName, tableName)
i++ i++
@ -126,12 +124,6 @@ func (c *Cluster) syncPublication(dbName string, databaseSlotsList map[string]za
tableList := strings.Join(tableNames, ", ") tableList := strings.Join(tableNames, ", ")
currentTables, exists := currentPublications[slotName] currentTables, exists := currentPublications[slotName]
// if newTables is empty it means that it's definition was removed from streams section
// but when slot is defined in manifest we should sync publications, too
// by reusing current tables we make sure it is not
if len(newTables) == 0 {
tableList = currentTables
}
if !exists { if !exists {
createPublications[slotName] = tableList createPublications[slotName] = tableList
} else if currentTables != tableList { } else if currentTables != tableList {
@ -184,25 +176,16 @@ func (c *Cluster) syncPublication(dbName string, databaseSlotsList map[string]za
func (c *Cluster) generateFabricEventStream(appId string) *zalandov1.FabricEventStream { func (c *Cluster) generateFabricEventStream(appId string) *zalandov1.FabricEventStream {
eventStreams := make([]zalandov1.EventStream, 0) eventStreams := make([]zalandov1.EventStream, 0)
resourceAnnotations := map[string]string{}
var err, err2 error
for _, stream := range c.Spec.Streams { for _, stream := range c.Spec.Streams {
if stream.ApplicationId != appId { if stream.ApplicationId != appId {
continue continue
} }
err = setResourceAnnotation(&resourceAnnotations, stream.CPU, constants.EventStreamCpuAnnotationKey)
err2 = setResourceAnnotation(&resourceAnnotations, stream.Memory, constants.EventStreamMemoryAnnotationKey)
if err != nil || err2 != nil {
c.logger.Warningf("could not set resource annotation for event stream: %v", err)
}
for tableName, table := range stream.Tables { for tableName, table := range stream.Tables {
streamSource := c.getEventStreamSource(stream, tableName, table.IdColumn) streamSource := c.getEventStreamSource(stream, tableName, table.IdColumn)
streamFlow := getEventStreamFlow(table.PayloadColumn) streamFlow := getEventStreamFlow(stream, table.PayloadColumn)
streamSink := getEventStreamSink(stream, table.EventType) streamSink := getEventStreamSink(stream, table.EventType)
streamRecovery := getEventStreamRecovery(stream, table.RecoveryEventType, table.EventType, table.IgnoreRecovery) streamRecovery := getEventStreamRecovery(stream, table.RecoveryEventType, table.EventType)
eventStreams = append(eventStreams, zalandov1.EventStream{ eventStreams = append(eventStreams, zalandov1.EventStream{
EventStreamFlow: streamFlow, EventStreamFlow: streamFlow,
@ -222,7 +205,7 @@ func (c *Cluster) generateFabricEventStream(appId string) *zalandov1.FabricEvent
Name: fmt.Sprintf("%s-%s", c.Name, strings.ToLower(util.RandomPassword(5))), Name: fmt.Sprintf("%s-%s", c.Name, strings.ToLower(util.RandomPassword(5))),
Namespace: c.Namespace, Namespace: c.Namespace,
Labels: c.labelsSet(true), Labels: c.labelsSet(true),
Annotations: c.AnnotationsToPropagate(c.annotationsSet(resourceAnnotations)), Annotations: c.AnnotationsToPropagate(c.annotationsSet(nil)),
OwnerReferences: c.ownerReferences(), OwnerReferences: c.ownerReferences(),
}, },
Spec: zalandov1.FabricEventStreamSpec{ Spec: zalandov1.FabricEventStreamSpec{
@ -232,27 +215,6 @@ func (c *Cluster) generateFabricEventStream(appId string) *zalandov1.FabricEvent
} }
} }
func setResourceAnnotation(annotations *map[string]string, resource *string, key string) error {
var (
isSmaller bool
err error
)
if resource != nil {
currentValue, exists := (*annotations)[key]
if exists {
isSmaller, err = util.IsSmallerQuantity(currentValue, *resource)
if err != nil {
return fmt.Errorf("could not compare resource in %q annotation: %v", key, err)
}
}
if isSmaller || !exists {
(*annotations)[key] = *resource
}
}
return nil
}
func (c *Cluster) getEventStreamSource(stream acidv1.Stream, tableName string, idColumn *string) zalandov1.EventStreamSource { func (c *Cluster) getEventStreamSource(stream acidv1.Stream, tableName string, idColumn *string) zalandov1.EventStreamSource {
table, schema := getTableSchema(tableName) table, schema := getTableSchema(tableName)
streamFilter := stream.Filter[tableName] streamFilter := stream.Filter[tableName]
@ -268,7 +230,7 @@ func (c *Cluster) getEventStreamSource(stream acidv1.Stream, tableName string, i
} }
} }
func getEventStreamFlow(payloadColumn *string) zalandov1.EventStreamFlow { func getEventStreamFlow(stream acidv1.Stream, payloadColumn *string) zalandov1.EventStreamFlow {
return zalandov1.EventStreamFlow{ return zalandov1.EventStreamFlow{
Type: constants.EventStreamFlowPgGenericType, Type: constants.EventStreamFlowPgGenericType,
PayloadColumn: payloadColumn, PayloadColumn: payloadColumn,
@ -283,7 +245,7 @@ func getEventStreamSink(stream acidv1.Stream, eventType string) zalandov1.EventS
} }
} }
func getEventStreamRecovery(stream acidv1.Stream, recoveryEventType, eventType string, ignoreRecovery *bool) zalandov1.EventStreamRecovery { func getEventStreamRecovery(stream acidv1.Stream, recoveryEventType, eventType string) zalandov1.EventStreamRecovery {
if (stream.EnableRecovery != nil && !*stream.EnableRecovery) || if (stream.EnableRecovery != nil && !*stream.EnableRecovery) ||
(stream.EnableRecovery == nil && recoveryEventType == "") { (stream.EnableRecovery == nil && recoveryEventType == "") {
return zalandov1.EventStreamRecovery{ return zalandov1.EventStreamRecovery{
@ -291,12 +253,6 @@ func getEventStreamRecovery(stream acidv1.Stream, recoveryEventType, eventType s
} }
} }
if ignoreRecovery != nil && *ignoreRecovery {
return zalandov1.EventStreamRecovery{
Type: constants.EventStreamRecoveryIgnoreType,
}
}
if stream.EnableRecovery != nil && *stream.EnableRecovery && recoveryEventType == "" { if stream.EnableRecovery != nil && *stream.EnableRecovery && recoveryEventType == "" {
recoveryEventType = fmt.Sprintf("%s-%s", eventType, constants.EventStreamRecoverySuffix) recoveryEventType = fmt.Sprintf("%s-%s", eventType, constants.EventStreamRecoverySuffix)
} }
@ -352,12 +308,20 @@ func (c *Cluster) syncStreams() error {
_, err := c.KubeClient.CustomResourceDefinitions().Get(context.TODO(), constants.EventStreamCRDName, metav1.GetOptions{}) _, err := c.KubeClient.CustomResourceDefinitions().Get(context.TODO(), constants.EventStreamCRDName, metav1.GetOptions{})
if k8sutil.ResourceNotFound(err) { if k8sutil.ResourceNotFound(err) {
c.logger.Debug("event stream CRD not installed, skipping") c.logger.Debugf("event stream CRD not installed, skipping")
return nil return nil
} }
// create map with every database and empty slot defintion databaseSlots := make(map[string]map[string]zalandov1.Slot)
// we need it to detect removal of streams from databases slotsToSync := make(map[string]map[string]string)
requiredPatroniConfig := c.Spec.Patroni
if len(requiredPatroniConfig.Slots) > 0 {
for slotName, slotConfig := range requiredPatroniConfig.Slots {
slotsToSync[slotName] = slotConfig
}
}
if err := c.initDbConn(); err != nil { if err := c.initDbConn(); err != nil {
return fmt.Errorf("could not init database connection") return fmt.Errorf("could not init database connection")
} }
@ -370,28 +334,13 @@ func (c *Cluster) syncStreams() error {
if err != nil { if err != nil {
return fmt.Errorf("could not get list of databases: %v", err) return fmt.Errorf("could not get list of databases: %v", err)
} }
databaseSlots := make(map[string]map[string]zalandov1.Slot) // get database name with empty list of slot, except template0 and template1
for dbName := range listDatabases { for dbName := range listDatabases {
if dbName != "template0" && dbName != "template1" { if dbName != "template0" && dbName != "template1" {
databaseSlots[dbName] = map[string]zalandov1.Slot{} databaseSlots[dbName] = map[string]zalandov1.Slot{}
} }
} }
// need to take explicitly defined slots into account whey syncing Patroni config
slotsToSync := make(map[string]map[string]string)
requiredPatroniConfig := c.Spec.Patroni
if len(requiredPatroniConfig.Slots) > 0 {
for slotName, slotConfig := range requiredPatroniConfig.Slots {
slotsToSync[slotName] = slotConfig
if _, exists := databaseSlots[slotConfig["database"]]; exists {
databaseSlots[slotConfig["database"]][slotName] = zalandov1.Slot{
Slot: slotConfig,
Publication: make(map[string]acidv1.StreamTable),
}
}
}
}
// get list of required slots and publications, group by database // get list of required slots and publications, group by database
for _, stream := range c.Spec.Streams { for _, stream := range c.Spec.Streams {
if _, exists := databaseSlots[stream.Database]; !exists { if _, exists := databaseSlots[stream.Database]; !exists {
@ -404,13 +353,13 @@ func (c *Cluster) syncStreams() error {
"type": "logical", "type": "logical",
} }
slotName := getSlotName(stream.Database, stream.ApplicationId) slotName := getSlotName(stream.Database, stream.ApplicationId)
slotAndPublication, exists := databaseSlots[stream.Database][slotName] if _, exists := databaseSlots[stream.Database][slotName]; !exists {
if !exists {
databaseSlots[stream.Database][slotName] = zalandov1.Slot{ databaseSlots[stream.Database][slotName] = zalandov1.Slot{
Slot: slot, Slot: slot,
Publication: stream.Tables, Publication: stream.Tables,
} }
} else { } else {
slotAndPublication := databaseSlots[stream.Database][slotName]
streamTables := slotAndPublication.Publication streamTables := slotAndPublication.Publication
for tableName, table := range stream.Tables { for tableName, table := range stream.Tables {
if _, exists := streamTables[tableName]; !exists { if _, exists := streamTables[tableName]; !exists {
@ -491,9 +440,7 @@ func (c *Cluster) syncStream(appId string) error {
c.setProcessName("syncing stream with applicationId %s", appId) c.setProcessName("syncing stream with applicationId %s", appId)
c.logger.Debugf("syncing stream with applicationId %s", appId) c.logger.Debugf("syncing stream with applicationId %s", appId)
listOptions := metav1.ListOptions{ listOptions := metav1.ListOptions{LabelSelector: c.labelsSet(true).String()}
LabelSelector: c.labelsSet(false).String(),
}
streams, err = c.KubeClient.FabricEventStreams(c.Namespace).List(context.TODO(), listOptions) streams, err = c.KubeClient.FabricEventStreams(c.Namespace).List(context.TODO(), listOptions)
if err != nil { if err != nil {
return fmt.Errorf("could not list of FabricEventStreams for applicationId %s: %v", appId, err) return fmt.Errorf("could not list of FabricEventStreams for applicationId %s: %v", appId, err)
@ -504,23 +451,30 @@ func (c *Cluster) syncStream(appId string) error {
if stream.Spec.ApplicationId != appId { if stream.Spec.ApplicationId != appId {
continue continue
} }
if streamExists {
c.logger.Warningf("more than one event stream with applicationId %s found, delete it", appId)
if err = c.KubeClient.FabricEventStreams(stream.ObjectMeta.Namespace).Delete(context.TODO(), stream.ObjectMeta.Name, metav1.DeleteOptions{}); err != nil {
c.logger.Errorf("could not delete event stream %q with applicationId %s: %v", stream.ObjectMeta.Name, appId, err)
} else {
c.logger.Infof("redundant event stream %q with applicationId %s has been successfully deleted", stream.ObjectMeta.Name, appId)
}
continue
}
streamExists = true streamExists = true
c.Streams[appId] = &stream
desiredStreams := c.generateFabricEventStream(appId) desiredStreams := c.generateFabricEventStream(appId)
if !reflect.DeepEqual(stream.ObjectMeta.OwnerReferences, desiredStreams.ObjectMeta.OwnerReferences) { if !reflect.DeepEqual(stream.ObjectMeta.OwnerReferences, desiredStreams.ObjectMeta.OwnerReferences) {
c.logger.Infof("owner references of event streams with applicationId %s do not match the current ones", appId) c.logger.Infof("owner references of event streams with applicationId %s do not match the current ones", appId)
stream.ObjectMeta.OwnerReferences = desiredStreams.ObjectMeta.OwnerReferences stream.ObjectMeta.OwnerReferences = desiredStreams.ObjectMeta.OwnerReferences
c.setProcessName("updating event streams with applicationId %s", appId) c.setProcessName("updating event streams with applicationId %s", appId)
updatedStream, err := c.KubeClient.FabricEventStreams(stream.Namespace).Update(context.TODO(), &stream, metav1.UpdateOptions{}) stream, err := c.KubeClient.FabricEventStreams(stream.Namespace).Update(context.TODO(), &stream, metav1.UpdateOptions{})
if err != nil { if err != nil {
return fmt.Errorf("could not update event streams with applicationId %s: %v", appId, err) return fmt.Errorf("could not update event streams with applicationId %s: %v", appId, err)
} }
c.Streams[appId] = updatedStream c.Streams[appId] = stream
} }
if match, reason := c.compareStreams(&stream, desiredStreams); !match { if match, reason := c.compareStreams(&stream, desiredStreams); !match {
c.logger.Infof("updating event streams with applicationId %s: %s", appId, reason) c.logger.Debugf("updating event streams with applicationId %s: %s", appId, reason)
// make sure to keep the old name with randomly generated suffix desiredStreams.ObjectMeta = stream.ObjectMeta
desiredStreams.ObjectMeta.Name = stream.ObjectMeta.Name
updatedStream, err := c.updateStreams(desiredStreams) updatedStream, err := c.updateStreams(desiredStreams)
if err != nil { if err != nil {
return fmt.Errorf("failed updating event streams %s with applicationId %s: %v", stream.Name, appId, err) return fmt.Errorf("failed updating event streams %s with applicationId %s: %v", stream.Name, appId, err)
@ -528,7 +482,6 @@ func (c *Cluster) syncStream(appId string) error {
c.Streams[appId] = updatedStream c.Streams[appId] = updatedStream
c.logger.Infof("event streams %q with applicationId %s have been successfully updated", updatedStream.Name, appId) c.logger.Infof("event streams %q with applicationId %s have been successfully updated", updatedStream.Name, appId)
} }
break
} }
if !streamExists { if !streamExists {
@ -546,29 +499,15 @@ func (c *Cluster) syncStream(appId string) error {
func (c *Cluster) compareStreams(curEventStreams, newEventStreams *zalandov1.FabricEventStream) (match bool, reason string) { func (c *Cluster) compareStreams(curEventStreams, newEventStreams *zalandov1.FabricEventStream) (match bool, reason string) {
reasons := make([]string, 0) reasons := make([]string, 0)
desiredAnnotations := make(map[string]string)
match = true match = true
// stream operator can add extra annotations so incl. current annotations in desired annotations // stream operator can add extra annotations so incl. current annotations in desired annotations
for curKey, curValue := range curEventStreams.Annotations { desiredAnnotations := c.annotationsSet(curEventStreams.Annotations)
if _, exists := desiredAnnotations[curKey]; !exists { if changed, reason := c.compareAnnotations(curEventStreams.ObjectMeta.Annotations, desiredAnnotations); changed {
desiredAnnotations[curKey] = curValue
}
}
// add/or override annotations if cpu and memory values were changed
for newKey, newValue := range newEventStreams.Annotations {
desiredAnnotations[newKey] = newValue
}
if changed, reason := c.compareAnnotations(curEventStreams.ObjectMeta.Annotations, desiredAnnotations, nil); changed {
match = false match = false
reasons = append(reasons, fmt.Sprintf("new streams annotations do not match: %s", reason)) reasons = append(reasons, fmt.Sprintf("new streams annotations do not match: %s", reason))
} }
if !reflect.DeepEqual(curEventStreams.ObjectMeta.Labels, newEventStreams.ObjectMeta.Labels) {
match = false
reasons = append(reasons, "new streams labels do not match the current ones")
}
if changed, reason := sameEventStreams(curEventStreams.Spec.EventStreams, newEventStreams.Spec.EventStreams); !changed { if changed, reason := sameEventStreams(curEventStreams.Spec.EventStreams, newEventStreams.Spec.EventStreams); !changed {
match = false match = false
reasons = append(reasons, fmt.Sprintf("new streams EventStreams array does not match : %s", reason)) reasons = append(reasons, fmt.Sprintf("new streams EventStreams array does not match : %s", reason))
@ -611,6 +550,7 @@ func (c *Cluster) cleanupRemovedStreams(appIds []string) error {
if err != nil { if err != nil {
errors = append(errors, fmt.Sprintf("failed deleting event streams with applicationId %s: %v", appId, err)) errors = append(errors, fmt.Sprintf("failed deleting event streams with applicationId %s: %v", appId, err))
} }
c.logger.Infof("event streams with applicationId %s have been successfully deleted", appId)
} }
} }

View File

@ -56,26 +56,21 @@ var (
ApplicationId: appId, ApplicationId: appId,
Database: "foo", Database: "foo",
Tables: map[string]acidv1.StreamTable{ Tables: map[string]acidv1.StreamTable{
"data.bar": { "data.bar": acidv1.StreamTable{
EventType: "stream-type-a", EventType: "stream-type-a",
IdColumn: k8sutil.StringToPointer("b_id"), IdColumn: k8sutil.StringToPointer("b_id"),
PayloadColumn: k8sutil.StringToPointer("b_payload"), PayloadColumn: k8sutil.StringToPointer("b_payload"),
}, },
"data.foobar": { "data.foobar": acidv1.StreamTable{
EventType: "stream-type-b", EventType: "stream-type-b",
RecoveryEventType: "stream-type-b-dlq", RecoveryEventType: "stream-type-b-dlq",
}, },
"data.foofoobar": {
EventType: "stream-type-c",
IgnoreRecovery: util.True(),
},
}, },
EnableRecovery: util.True(), EnableRecovery: util.True(),
Filter: map[string]*string{ Filter: map[string]*string{
"data.bar": k8sutil.StringToPointer("[?(@.source.txId > 500 && @.source.lsn > 123456)]"), "data.bar": k8sutil.StringToPointer("[?(@.source.txId > 500 && @.source.lsn > 123456)]"),
}, },
BatchSize: k8sutil.UInt32ToPointer(uint32(100)), BatchSize: k8sutil.UInt32ToPointer(uint32(100)),
CPU: k8sutil.StringToPointer("250m"),
}, },
}, },
TeamID: "acid", TeamID: "acid",
@ -93,16 +88,13 @@ var (
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-12345", clusterName), Name: fmt.Sprintf("%s-12345", clusterName),
Namespace: namespace, Namespace: namespace,
Annotations: map[string]string{
constants.EventStreamCpuAnnotationKey: "250m",
},
Labels: map[string]string{ Labels: map[string]string{
"application": "spilo", "application": "spilo",
"cluster-name": clusterName, "cluster-name": fmt.Sprintf("%s-2", clusterName),
"team": "acid", "team": "acid",
}, },
OwnerReferences: []metav1.OwnerReference{ OwnerReferences: []metav1.OwnerReference{
{ metav1.OwnerReference{
APIVersion: "apps/v1", APIVersion: "apps/v1",
Kind: "StatefulSet", Kind: "StatefulSet",
Name: "acid-test-cluster", Name: "acid-test-cluster",
@ -113,7 +105,7 @@ var (
Spec: zalandov1.FabricEventStreamSpec{ Spec: zalandov1.FabricEventStreamSpec{
ApplicationId: appId, ApplicationId: appId,
EventStreams: []zalandov1.EventStream{ EventStreams: []zalandov1.EventStream{
{ zalandov1.EventStream{
EventStreamFlow: zalandov1.EventStreamFlow{ EventStreamFlow: zalandov1.EventStreamFlow{
PayloadColumn: k8sutil.StringToPointer("b_payload"), PayloadColumn: k8sutil.StringToPointer("b_payload"),
Type: constants.EventStreamFlowPgGenericType, Type: constants.EventStreamFlowPgGenericType,
@ -152,7 +144,7 @@ var (
Type: constants.EventStreamSourcePGType, Type: constants.EventStreamSourcePGType,
}, },
}, },
{ zalandov1.EventStream{
EventStreamFlow: zalandov1.EventStreamFlow{ EventStreamFlow: zalandov1.EventStreamFlow{
Type: constants.EventStreamFlowPgGenericType, Type: constants.EventStreamFlowPgGenericType,
}, },
@ -188,37 +180,6 @@ var (
Type: constants.EventStreamSourcePGType, Type: constants.EventStreamSourcePGType,
}, },
}, },
{
EventStreamFlow: zalandov1.EventStreamFlow{
Type: constants.EventStreamFlowPgGenericType,
},
EventStreamRecovery: zalandov1.EventStreamRecovery{
Type: constants.EventStreamRecoveryIgnoreType,
},
EventStreamSink: zalandov1.EventStreamSink{
EventType: "stream-type-c",
MaxBatchSize: k8sutil.UInt32ToPointer(uint32(100)),
Type: constants.EventStreamSinkNakadiType,
},
EventStreamSource: zalandov1.EventStreamSource{
Connection: zalandov1.Connection{
DBAuth: zalandov1.DBAuth{
Name: fmt.Sprintf("fes-user.%s.credentials.postgresql.acid.zalan.do", clusterName),
PasswordKey: "password",
Type: constants.EventStreamSourceAuthType,
UserKey: "username",
},
Url: fmt.Sprintf("jdbc:postgresql://%s.%s/foo?user=%s&ssl=true&sslmode=require", clusterName, namespace, fesUser),
SlotName: slotName,
PluginType: constants.EventStreamSourcePluginType,
},
Schema: "data",
EventStreamTable: zalandov1.EventStreamTable{
Name: "foofoobar",
},
Type: constants.EventStreamSourcePGType,
},
},
}, },
}, },
} }
@ -280,7 +241,7 @@ func TestHasSlotsInSync(t *testing.T) {
"type": "logical", "type": "logical",
}, },
Publication: map[string]acidv1.StreamTable{ Publication: map[string]acidv1.StreamTable{
"test1": { "test1": acidv1.StreamTable{
EventType: "stream-type-a", EventType: "stream-type-a",
}, },
}, },
@ -288,7 +249,7 @@ func TestHasSlotsInSync(t *testing.T) {
}, },
}, },
actualSlots: map[string]map[string]string{ actualSlots: map[string]map[string]string{
slotName: { slotName: map[string]string{
"databases": dbName, "databases": dbName,
"plugin": constants.EventStreamSourcePluginType, "plugin": constants.EventStreamSourcePluginType,
"type": "logical", "type": "logical",
@ -307,7 +268,7 @@ func TestHasSlotsInSync(t *testing.T) {
"type": "logical", "type": "logical",
}, },
Publication: map[string]acidv1.StreamTable{ Publication: map[string]acidv1.StreamTable{
"test1": { "test1": acidv1.StreamTable{
EventType: "stream-type-a", EventType: "stream-type-a",
}, },
}, },
@ -328,7 +289,7 @@ func TestHasSlotsInSync(t *testing.T) {
"type": "logical", "type": "logical",
}, },
Publication: map[string]acidv1.StreamTable{ Publication: map[string]acidv1.StreamTable{
"test1": { "test1": acidv1.StreamTable{
EventType: "stream-type-a", EventType: "stream-type-a",
}, },
}, },
@ -351,7 +312,7 @@ func TestHasSlotsInSync(t *testing.T) {
"type": "logical", "type": "logical",
}, },
Publication: map[string]acidv1.StreamTable{ Publication: map[string]acidv1.StreamTable{
"test1": { "test1": acidv1.StreamTable{
EventType: "stream-type-a", EventType: "stream-type-a",
}, },
}, },
@ -365,7 +326,7 @@ func TestHasSlotsInSync(t *testing.T) {
"type": "logical", "type": "logical",
}, },
Publication: map[string]acidv1.StreamTable{ Publication: map[string]acidv1.StreamTable{
"test2": { "test2": acidv1.StreamTable{
EventType: "stream-type-b", EventType: "stream-type-b",
}, },
}, },
@ -373,7 +334,7 @@ func TestHasSlotsInSync(t *testing.T) {
}, },
}, },
actualSlots: map[string]map[string]string{ actualSlots: map[string]map[string]string{
slotName: { slotName: map[string]string{
"databases": dbName, "databases": dbName,
"plugin": constants.EventStreamSourcePluginType, "plugin": constants.EventStreamSourcePluginType,
"type": "logical", "type": "logical",
@ -392,7 +353,7 @@ func TestHasSlotsInSync(t *testing.T) {
"type": "logical", "type": "logical",
}, },
Publication: map[string]acidv1.StreamTable{ Publication: map[string]acidv1.StreamTable{
"test1": { "test1": acidv1.StreamTable{
EventType: "stream-type-a", EventType: "stream-type-a",
}, },
}, },
@ -406,7 +367,7 @@ func TestHasSlotsInSync(t *testing.T) {
"type": "logical", "type": "logical",
}, },
Publication: map[string]acidv1.StreamTable{ Publication: map[string]acidv1.StreamTable{
"test2": { "test2": acidv1.StreamTable{
EventType: "stream-type-b", EventType: "stream-type-b",
}, },
}, },
@ -414,7 +375,7 @@ func TestHasSlotsInSync(t *testing.T) {
}, },
}, },
actualSlots: map[string]map[string]string{ actualSlots: map[string]map[string]string{
slotName: { slotName: map[string]string{
"databases": dbName, "databases": dbName,
"plugin": constants.EventStreamSourcePluginType, "plugin": constants.EventStreamSourcePluginType,
"type": "logical", "type": "logical",
@ -433,7 +394,7 @@ func TestHasSlotsInSync(t *testing.T) {
"type": "logical", "type": "logical",
}, },
Publication: map[string]acidv1.StreamTable{ Publication: map[string]acidv1.StreamTable{
"test1": { "test1": acidv1.StreamTable{
EventType: "stream-type-a", EventType: "stream-type-a",
}, },
}, },
@ -447,7 +408,7 @@ func TestHasSlotsInSync(t *testing.T) {
"type": "logical", "type": "logical",
}, },
Publication: map[string]acidv1.StreamTable{ Publication: map[string]acidv1.StreamTable{
"test2": { "test2": acidv1.StreamTable{
EventType: "stream-type-b", EventType: "stream-type-b",
}, },
}, },
@ -455,7 +416,7 @@ func TestHasSlotsInSync(t *testing.T) {
}, },
}, },
actualSlots: map[string]map[string]string{ actualSlots: map[string]map[string]string{
slotName: { slotName: map[string]string{
"databases": dbName, "databases": dbName,
"plugin": constants.EventStreamSourcePluginType, "plugin": constants.EventStreamSourcePluginType,
"type": "logical", "type": "logical",
@ -488,7 +449,7 @@ func TestGenerateFabricEventStream(t *testing.T) {
} }
listOptions := metav1.ListOptions{ listOptions := metav1.ListOptions{
LabelSelector: cluster.labelsSet(false).String(), LabelSelector: cluster.labelsSet(true).String(),
} }
streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
assert.NoError(t, err) assert.NoError(t, err)
@ -527,8 +488,7 @@ func newFabricEventStream(streams []zalandov1.EventStream, annotations map[strin
} }
func TestSyncStreams(t *testing.T) { func TestSyncStreams(t *testing.T) {
newClusterName := fmt.Sprintf("%s-2", pg.Name) pg.Name = fmt.Sprintf("%s-2", pg.Name)
pg.Name = newClusterName
var cluster = New( var cluster = New(
Config{ Config{
OpConfig: config.Config{ OpConfig: config.Config{
@ -540,6 +500,7 @@ func TestSyncStreams(t *testing.T) {
DefaultCPULimit: "300m", DefaultCPULimit: "300m",
DefaultMemoryRequest: "300Mi", DefaultMemoryRequest: "300Mi",
DefaultMemoryLimit: "300Mi", DefaultMemoryLimit: "300Mi",
EnableOwnerReferences: util.True(),
PodRoleLabel: "spilo-role", PodRoleLabel: "spilo-role",
}, },
}, },
@ -553,23 +514,39 @@ func TestSyncStreams(t *testing.T) {
err = cluster.syncStream(appId) err = cluster.syncStream(appId)
assert.NoError(t, err) assert.NoError(t, err)
// sync the stream again // create a second stream with same spec but with different name
createdStream, err := cluster.KubeClient.FabricEventStreams(namespace).Create(
context.TODO(), fes, metav1.CreateOptions{})
assert.NoError(t, err)
assert.Equal(t, createdStream.Spec.ApplicationId, appId)
// check that two streams exist
listOptions := metav1.ListOptions{
LabelSelector: cluster.labelsSet(true).String(),
}
streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
assert.NoError(t, err)
assert.Equalf(t, 2, len(streams.Items), "unexpected number of streams found: got %d, but expected only 2", len(streams.Items))
// sync the stream which should remove the redundant stream
err = cluster.syncStream(appId) err = cluster.syncStream(appId)
assert.NoError(t, err) assert.NoError(t, err)
// check that only one stream remains after sync // check that only one stream remains after sync
listOptions := metav1.ListOptions{ streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
LabelSelector: cluster.labelsSet(false).String(),
}
streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equalf(t, 1, len(streams.Items), "unexpected number of streams found: got %d, but expected only 1", len(streams.Items)) assert.Equalf(t, 1, len(streams.Items), "unexpected number of streams found: got %d, but expected only 1", len(streams.Items))
// check owner references
if !reflect.DeepEqual(streams.Items[0].OwnerReferences, cluster.ownerReferences()) {
t.Errorf("unexpected owner references, expected %#v, got %#v", cluster.ownerReferences(), streams.Items[0].OwnerReferences)
}
} }
func TestSameStreams(t *testing.T) { func TestSameStreams(t *testing.T) {
testName := "TestSameStreams" testName := "TestSameStreams"
annotationsA := map[string]string{constants.EventStreamMemoryAnnotationKey: "500Mi"} annotationsA := map[string]string{"owned-by": "acid"}
annotationsB := map[string]string{constants.EventStreamMemoryAnnotationKey: "1Gi"} annotationsB := map[string]string{"owned-by": "foo"}
stream1 := zalandov1.EventStream{ stream1 := zalandov1.EventStream{
EventStreamFlow: zalandov1.EventStreamFlow{}, EventStreamFlow: zalandov1.EventStreamFlow{},
@ -638,49 +615,42 @@ func TestSameStreams(t *testing.T) {
streamsA: newFabricEventStream([]zalandov1.EventStream{stream1}, nil), streamsA: newFabricEventStream([]zalandov1.EventStream{stream1}, nil),
streamsB: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, nil), streamsB: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, nil),
match: false, match: false,
reason: "new streams EventStreams array does not match : number of defined streams is different", reason: "number of defined streams is different",
}, },
{ {
subTest: "different number of streams", subTest: "different number of streams",
streamsA: newFabricEventStream([]zalandov1.EventStream{stream1}, nil), streamsA: newFabricEventStream([]zalandov1.EventStream{stream1}, nil),
streamsB: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, nil), streamsB: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, nil),
match: false, match: false,
reason: "new streams EventStreams array does not match : number of defined streams is different", reason: "number of defined streams is different",
}, },
{ {
subTest: "event stream specs differ", subTest: "event stream specs differ",
streamsA: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, nil), streamsA: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, nil),
streamsB: fes, streamsB: fes,
match: false, match: false,
reason: "new streams annotations do not match: Added \"fes.zalando.org/FES_CPU\" with value \"250m\"., new streams labels do not match the current ones, new streams EventStreams array does not match : number of defined streams is different", reason: "number of defined streams is different",
}, },
{ {
subTest: "event stream recovery specs differ", subTest: "event stream recovery specs differ",
streamsA: newFabricEventStream([]zalandov1.EventStream{stream2}, nil), streamsA: newFabricEventStream([]zalandov1.EventStream{stream2}, nil),
streamsB: newFabricEventStream([]zalandov1.EventStream{stream3}, nil), streamsB: newFabricEventStream([]zalandov1.EventStream{stream3}, nil),
match: false, match: false,
reason: "new streams EventStreams array does not match : event stream specs differ", reason: "event stream specs differ",
},
{
subTest: "event stream with new annotations",
streamsA: newFabricEventStream([]zalandov1.EventStream{stream2}, nil),
streamsB: newFabricEventStream([]zalandov1.EventStream{stream2}, annotationsA),
match: false,
reason: "new streams annotations do not match: Added \"fes.zalando.org/FES_MEMORY\" with value \"500Mi\".",
}, },
{ {
subTest: "event stream annotations differ", subTest: "event stream annotations differ",
streamsA: newFabricEventStream([]zalandov1.EventStream{stream3}, annotationsA), streamsA: newFabricEventStream([]zalandov1.EventStream{stream2}, annotationsA),
streamsB: newFabricEventStream([]zalandov1.EventStream{stream3}, annotationsB), streamsB: newFabricEventStream([]zalandov1.EventStream{stream3}, annotationsB),
match: false, match: false,
reason: "new streams annotations do not match: \"fes.zalando.org/FES_MEMORY\" changed from \"500Mi\" to \"1Gi\".", reason: "event stream specs differ",
}, },
} }
for _, tt := range tests { for _, tt := range tests {
streamsMatch, matchReason := cluster.compareStreams(tt.streamsA, tt.streamsB) streamsMatch, matchReason := cluster.compareStreams(tt.streamsA, tt.streamsB)
if streamsMatch != tt.match || matchReason != tt.reason { if streamsMatch != tt.match {
t.Errorf("%s %s: unexpected match result when comparing streams: got %s, expected %s", t.Errorf("%s %s: unexpected match result when comparing streams: got %s, epxected %s",
testName, tt.subTest, matchReason, tt.reason) testName, tt.subTest, matchReason, tt.reason)
} }
} }
@ -688,105 +658,6 @@ func TestSameStreams(t *testing.T) {
func TestUpdateStreams(t *testing.T) { func TestUpdateStreams(t *testing.T) {
pg.Name = fmt.Sprintf("%s-3", pg.Name) pg.Name = fmt.Sprintf("%s-3", pg.Name)
var cluster = New(
Config{
OpConfig: config.Config{
PodManagementPolicy: "ordered_ready",
Resources: config.Resources{
ClusterLabels: map[string]string{"application": "spilo"},
ClusterNameLabel: "cluster-name",
DefaultCPURequest: "300m",
DefaultCPULimit: "300m",
DefaultMemoryRequest: "300Mi",
DefaultMemoryLimit: "300Mi",
EnableOwnerReferences: util.True(),
PodRoleLabel: "spilo-role",
},
},
}, client, pg, logger, eventRecorder)
_, err := cluster.KubeClient.Postgresqls(namespace).Create(
context.TODO(), &pg, metav1.CreateOptions{})
assert.NoError(t, err)
// create stream with different owner reference
fes.ObjectMeta.Name = fmt.Sprintf("%s-12345", pg.Name)
fes.ObjectMeta.Labels["cluster-name"] = pg.Name
createdStream, err := cluster.KubeClient.FabricEventStreams(namespace).Create(
context.TODO(), fes, metav1.CreateOptions{})
assert.NoError(t, err)
assert.Equal(t, createdStream.Spec.ApplicationId, appId)
// sync the stream which should update the owner reference
err = cluster.syncStream(appId)
assert.NoError(t, err)
// check that only one stream exists after sync
listOptions := metav1.ListOptions{
LabelSelector: cluster.labelsSet(true).String(),
}
streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
assert.NoError(t, err)
assert.Equalf(t, 1, len(streams.Items), "unexpected number of streams found: got %d, but expected only 1", len(streams.Items))
// compare owner references
if !reflect.DeepEqual(streams.Items[0].OwnerReferences, cluster.ownerReferences()) {
t.Errorf("unexpected owner references, expected %#v, got %#v", cluster.ownerReferences(), streams.Items[0].OwnerReferences)
}
// change specs of streams and patch CRD
for i, stream := range pg.Spec.Streams {
if stream.ApplicationId == appId {
streamTable := stream.Tables["data.bar"]
streamTable.EventType = "stream-type-c"
stream.Tables["data.bar"] = streamTable
stream.BatchSize = k8sutil.UInt32ToPointer(uint32(250))
pg.Spec.Streams[i] = stream
}
}
// compare stream returned from API with expected stream
streams = patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions)
result := cluster.generateFabricEventStream(appId)
if match, _ := cluster.compareStreams(&streams.Items[0], result); !match {
t.Errorf("Malformed FabricEventStream after updating manifest, expected %#v, got %#v", streams.Items[0], result)
}
// disable recovery
for idx, stream := range pg.Spec.Streams {
if stream.ApplicationId == appId {
stream.EnableRecovery = util.False()
pg.Spec.Streams[idx] = stream
}
}
streams = patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions)
result = cluster.generateFabricEventStream(appId)
if match, _ := cluster.compareStreams(&streams.Items[0], result); !match {
t.Errorf("Malformed FabricEventStream after disabling event recovery, expected %#v, got %#v", streams.Items[0], result)
}
}
func patchPostgresqlStreams(t *testing.T, cluster *Cluster, pgSpec *acidv1.PostgresSpec, listOptions metav1.ListOptions) (streams *zalandov1.FabricEventStreamList) {
patchData, err := specPatch(pgSpec)
assert.NoError(t, err)
pgPatched, err := cluster.KubeClient.Postgresqls(namespace).Patch(
context.TODO(), cluster.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, "spec")
assert.NoError(t, err)
cluster.Postgresql.Spec = pgPatched.Spec
err = cluster.syncStream(appId)
assert.NoError(t, err)
streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
assert.NoError(t, err)
return streams
}
func TestDeleteStreams(t *testing.T) {
pg.Name = fmt.Sprintf("%s-4", pg.Name)
var cluster = New( var cluster = New(
Config{ Config{
OpConfig: config.Config{ OpConfig: config.Config{
@ -824,7 +695,7 @@ func TestDeleteStreams(t *testing.T) {
// compare stream returned from API with expected stream // compare stream returned from API with expected stream
listOptions := metav1.ListOptions{ listOptions := metav1.ListOptions{
LabelSelector: cluster.labelsSet(false).String(), LabelSelector: cluster.labelsSet(true).String(),
} }
streams := patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions) streams := patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions)
result := cluster.generateFabricEventStream(appId) result := cluster.generateFabricEventStream(appId)
@ -832,14 +703,6 @@ func TestDeleteStreams(t *testing.T) {
t.Errorf("Malformed FabricEventStream after updating manifest, expected %#v, got %#v", streams.Items[0], result) t.Errorf("Malformed FabricEventStream after updating manifest, expected %#v, got %#v", streams.Items[0], result)
} }
// change teamId and check that stream is updated
pg.Spec.TeamID = "new-team"
streams = patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions)
result = cluster.generateFabricEventStream(appId)
if match, _ := cluster.compareStreams(&streams.Items[0], result); !match {
t.Errorf("Malformed FabricEventStream after updating teamId, expected %#v, got %#v", streams.Items[0].ObjectMeta.Labels, result.ObjectMeta.Labels)
}
// disable recovery // disable recovery
for idx, stream := range pg.Spec.Streams { for idx, stream := range pg.Spec.Streams {
if stream.ApplicationId == appId { if stream.ApplicationId == appId {
@ -854,6 +717,9 @@ func TestDeleteStreams(t *testing.T) {
t.Errorf("Malformed FabricEventStream after disabling event recovery, expected %#v, got %#v", streams.Items[0], result) t.Errorf("Malformed FabricEventStream after disabling event recovery, expected %#v, got %#v", streams.Items[0], result)
} }
mockClient := k8sutil.NewMockKubernetesClient()
cluster.KubeClient.CustomResourceDefinitionsGetter = mockClient.CustomResourceDefinitionsGetter
// remove streams from manifest // remove streams from manifest
pg.Spec.Streams = nil pg.Spec.Streams = nil
pgUpdated, err := cluster.KubeClient.Postgresqls(namespace).Update( pgUpdated, err := cluster.KubeClient.Postgresqls(namespace).Update(
@ -863,29 +729,26 @@ func TestDeleteStreams(t *testing.T) {
appIds := getDistinctApplicationIds(pgUpdated.Spec.Streams) appIds := getDistinctApplicationIds(pgUpdated.Spec.Streams)
cluster.cleanupRemovedStreams(appIds) cluster.cleanupRemovedStreams(appIds)
// check that streams have been deleted
streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
assert.NoError(t, err) if len(streams.Items) > 0 || err != nil {
assert.Equalf(t, 0, len(streams.Items), "unexpected number of streams found: got %d, but expected none", len(streams.Items)) t.Errorf("stream resource has not been removed or unexpected error %v", err)
}
}
// create stream to test deleteStreams code func patchPostgresqlStreams(t *testing.T, cluster *Cluster, pgSpec *acidv1.PostgresSpec, listOptions metav1.ListOptions) (streams *zalandov1.FabricEventStreamList) {
fes.ObjectMeta.Name = fmt.Sprintf("%s-12345", pg.Name) patchData, err := specPatch(pgSpec)
fes.ObjectMeta.Labels["cluster-name"] = pg.Name
_, err = cluster.KubeClient.FabricEventStreams(namespace).Create(
context.TODO(), fes, metav1.CreateOptions{})
assert.NoError(t, err) assert.NoError(t, err)
// sync it once to cluster struct pgPatched, err := cluster.KubeClient.Postgresqls(namespace).Patch(
context.TODO(), cluster.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, "spec")
assert.NoError(t, err)
cluster.Postgresql.Spec = pgPatched.Spec
err = cluster.syncStream(appId) err = cluster.syncStream(appId)
assert.NoError(t, err) assert.NoError(t, err)
// we need a mock client because deleteStreams checks for CRD existance
mockClient := k8sutil.NewMockKubernetesClient()
cluster.KubeClient.CustomResourceDefinitionsGetter = mockClient.CustomResourceDefinitionsGetter
cluster.deleteStreams()
// check that streams have been deleted
streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equalf(t, 0, len(streams.Items), "unexpected number of streams found: got %d, but expected none", len(streams.Items))
return streams
} }

View File

@ -4,10 +4,8 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"maps"
"reflect" "reflect"
"regexp" "regexp"
"slices"
"strconv" "strconv"
"strings" "strings"
"time" "time"
@ -17,6 +15,8 @@ import (
"github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util"
"github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/constants"
"github.com/zalando/postgres-operator/pkg/util/k8sutil" "github.com/zalando/postgres-operator/pkg/util/k8sutil"
"golang.org/x/exp/maps"
"golang.org/x/exp/slices"
batchv1 "k8s.io/api/batch/v1" batchv1 "k8s.io/api/batch/v1"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
policyv1 "k8s.io/api/policy/v1" policyv1 "k8s.io/api/policy/v1"
@ -97,11 +97,6 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
} }
} }
if !isInMaintenanceWindow(newSpec.Spec.MaintenanceWindows) {
// do not apply any major version related changes yet
newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion
}
if err = c.syncStatefulSet(); err != nil { if err = c.syncStatefulSet(); err != nil {
if !k8sutil.ResourceAlreadyExists(err) { if !k8sutil.ResourceAlreadyExists(err) {
err = fmt.Errorf("could not sync statefulsets: %v", err) err = fmt.Errorf("could not sync statefulsets: %v", err)
@ -117,8 +112,8 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
} }
c.logger.Debug("syncing pod disruption budgets") c.logger.Debug("syncing pod disruption budgets")
if err = c.syncPodDisruptionBudgets(false); err != nil { if err = c.syncPodDisruptionBudget(false); err != nil {
err = fmt.Errorf("could not sync pod disruption budgets: %v", err) err = fmt.Errorf("could not sync pod disruption budget: %v", err)
return err return err
} }
@ -153,10 +148,7 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error {
return fmt.Errorf("could not sync connection pooler: %v", err) return fmt.Errorf("could not sync connection pooler: %v", err)
} }
// sync if manifest stream count is different from stream CR count if len(c.Spec.Streams) > 0 {
// it can be that they are always different due to grouping of manifest streams
// but we would catch missed removals on update
if len(c.Spec.Streams) != len(c.Streams) {
c.logger.Debug("syncing streams") c.logger.Debug("syncing streams")
if err = c.syncStreams(); err != nil { if err = c.syncStreams(); err != nil {
err = fmt.Errorf("could not sync streams: %v", err) err = fmt.Errorf("could not sync streams: %v", err)
@ -238,7 +230,7 @@ func (c *Cluster) syncPatroniConfigMap(suffix string) error {
maps.Copy(annotations, cm.Annotations) maps.Copy(annotations, cm.Annotations)
// Patroni can add extra annotations so incl. current annotations in desired annotations // Patroni can add extra annotations so incl. current annotations in desired annotations
desiredAnnotations := c.annotationsSet(cm.Annotations) desiredAnnotations := c.annotationsSet(cm.Annotations)
if changed, _ := c.compareAnnotations(annotations, desiredAnnotations, nil); changed { if changed, _ := c.compareAnnotations(annotations, desiredAnnotations); changed {
patchData, err := metaAnnotationsPatch(desiredAnnotations) patchData, err := metaAnnotationsPatch(desiredAnnotations)
if err != nil { if err != nil {
return fmt.Errorf("could not form patch for %s config map: %v", configMapName, err) return fmt.Errorf("could not form patch for %s config map: %v", configMapName, err)
@ -283,7 +275,7 @@ func (c *Cluster) syncPatroniEndpoint(suffix string) error {
maps.Copy(annotations, ep.Annotations) maps.Copy(annotations, ep.Annotations)
// Patroni can add extra annotations so incl. current annotations in desired annotations // Patroni can add extra annotations so incl. current annotations in desired annotations
desiredAnnotations := c.annotationsSet(ep.Annotations) desiredAnnotations := c.annotationsSet(ep.Annotations)
if changed, _ := c.compareAnnotations(annotations, desiredAnnotations, nil); changed { if changed, _ := c.compareAnnotations(annotations, desiredAnnotations); changed {
patchData, err := metaAnnotationsPatch(desiredAnnotations) patchData, err := metaAnnotationsPatch(desiredAnnotations)
if err != nil { if err != nil {
return fmt.Errorf("could not form patch for %s endpoint: %v", endpointName, err) return fmt.Errorf("could not form patch for %s endpoint: %v", endpointName, err)
@ -308,7 +300,6 @@ func (c *Cluster) syncPatroniService() error {
err error err error
) )
serviceName := fmt.Sprintf("%s-%s", c.Name, Patroni) serviceName := fmt.Sprintf("%s-%s", c.Name, Patroni)
c.logger.Debugf("syncing %s service", serviceName)
c.setProcessName("syncing %s service", serviceName) c.setProcessName("syncing %s service", serviceName)
if svc, err = c.KubeClient.Services(c.Namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}); err == nil { if svc, err = c.KubeClient.Services(c.Namespace).Get(context.TODO(), serviceName, metav1.GetOptions{}); err == nil {
@ -320,7 +311,7 @@ func (c *Cluster) syncPatroniService() error {
c.setProcessName("updating %v service", serviceName) c.setProcessName("updating %v service", serviceName)
svc, err = c.KubeClient.Services(c.Namespace).Update(context.TODO(), svc, metav1.UpdateOptions{}) svc, err = c.KubeClient.Services(c.Namespace).Update(context.TODO(), svc, metav1.UpdateOptions{})
if err != nil { if err != nil {
return fmt.Errorf("could not update %s service: %v", serviceName, err) return fmt.Errorf("could not update %s endpoint: %v", serviceName, err)
} }
c.Services[Patroni] = svc c.Services[Patroni] = svc
} }
@ -328,7 +319,7 @@ func (c *Cluster) syncPatroniService() error {
maps.Copy(annotations, svc.Annotations) maps.Copy(annotations, svc.Annotations)
// Patroni can add extra annotations so incl. current annotations in desired annotations // Patroni can add extra annotations so incl. current annotations in desired annotations
desiredAnnotations := c.annotationsSet(svc.Annotations) desiredAnnotations := c.annotationsSet(svc.Annotations)
if changed, _ := c.compareAnnotations(annotations, desiredAnnotations, nil); changed { if changed, _ := c.compareAnnotations(annotations, desiredAnnotations); changed {
patchData, err := metaAnnotationsPatch(desiredAnnotations) patchData, err := metaAnnotationsPatch(desiredAnnotations)
if err != nil { if err != nil {
return fmt.Errorf("could not form patch for %s service: %v", serviceName, err) return fmt.Errorf("could not form patch for %s service: %v", serviceName, err)
@ -420,7 +411,7 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error {
return fmt.Errorf("could not update %s endpoint: %v", role, err) return fmt.Errorf("could not update %s endpoint: %v", role, err)
} }
} else { } else {
if changed, _ := c.compareAnnotations(ep.Annotations, desiredEp.Annotations, nil); changed { if changed, _ := c.compareAnnotations(ep.Annotations, desiredEp.Annotations); changed {
patchData, err := metaAnnotationsPatch(desiredEp.Annotations) patchData, err := metaAnnotationsPatch(desiredEp.Annotations)
if err != nil { if err != nil {
return fmt.Errorf("could not form patch for %s endpoint: %v", role, err) return fmt.Errorf("could not form patch for %s endpoint: %v", role, err)
@ -455,22 +446,22 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error {
return nil return nil
} }
func (c *Cluster) syncPrimaryPodDisruptionBudget(isUpdate bool) error { func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error {
var ( var (
pdb *policyv1.PodDisruptionBudget pdb *policyv1.PodDisruptionBudget
err error err error
) )
if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.PrimaryPodDisruptionBudgetName(), metav1.GetOptions{}); err == nil { if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.podDisruptionBudgetName(), metav1.GetOptions{}); err == nil {
c.PrimaryPodDisruptionBudget = pdb c.PodDisruptionBudget = pdb
newPDB := c.generatePrimaryPodDisruptionBudget() newPDB := c.generatePodDisruptionBudget()
match, reason := c.comparePodDisruptionBudget(pdb, newPDB) match, reason := c.comparePodDisruptionBudget(pdb, newPDB)
if !match { if !match {
c.logPDBChanges(pdb, newPDB, isUpdate, reason) c.logPDBChanges(pdb, newPDB, isUpdate, reason)
if err = c.updatePrimaryPodDisruptionBudget(newPDB); err != nil { if err = c.updatePodDisruptionBudget(newPDB); err != nil {
return err return err
} }
} else { } else {
c.PrimaryPodDisruptionBudget = pdb c.PodDisruptionBudget = pdb
} }
return nil return nil
@ -479,74 +470,21 @@ func (c *Cluster) syncPrimaryPodDisruptionBudget(isUpdate bool) error {
return fmt.Errorf("could not get pod disruption budget: %v", err) return fmt.Errorf("could not get pod disruption budget: %v", err)
} }
// no existing pod disruption budget, create new one // no existing pod disruption budget, create new one
c.logger.Infof("could not find the primary pod disruption budget") c.logger.Infof("could not find the cluster's pod disruption budget")
if err = c.createPrimaryPodDisruptionBudget(); err != nil { if pdb, err = c.createPodDisruptionBudget(); err != nil {
if !k8sutil.ResourceAlreadyExists(err) { if !k8sutil.ResourceAlreadyExists(err) {
return fmt.Errorf("could not create primary pod disruption budget: %v", err) return fmt.Errorf("could not create pod disruption budget: %v", err)
} }
c.logger.Infof("pod disruption budget %q already exists", util.NameFromMeta(pdb.ObjectMeta)) c.logger.Infof("pod disruption budget %q already exists", util.NameFromMeta(pdb.ObjectMeta))
if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.PrimaryPodDisruptionBudgetName(), metav1.GetOptions{}); err != nil { if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.podDisruptionBudgetName(), metav1.GetOptions{}); err != nil {
return fmt.Errorf("could not fetch existing %q pod disruption budget", util.NameFromMeta(pdb.ObjectMeta)) return fmt.Errorf("could not fetch existing %q pod disruption budget", util.NameFromMeta(pdb.ObjectMeta))
} }
} }
return nil c.logger.Infof("created missing pod disruption budget %q", util.NameFromMeta(pdb.ObjectMeta))
} c.PodDisruptionBudget = pdb
func (c *Cluster) syncCriticalOpPodDisruptionBudget(isUpdate bool) error {
var (
pdb *policyv1.PodDisruptionBudget
err error
)
if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.criticalOpPodDisruptionBudgetName(), metav1.GetOptions{}); err == nil {
c.CriticalOpPodDisruptionBudget = pdb
newPDB := c.generateCriticalOpPodDisruptionBudget()
match, reason := c.comparePodDisruptionBudget(pdb, newPDB)
if !match {
c.logPDBChanges(pdb, newPDB, isUpdate, reason)
if err = c.updateCriticalOpPodDisruptionBudget(newPDB); err != nil {
return err
}
} else {
c.CriticalOpPodDisruptionBudget = pdb
}
return nil
}
if !k8sutil.ResourceNotFound(err) {
return fmt.Errorf("could not get pod disruption budget: %v", err)
}
// no existing pod disruption budget, create new one
c.logger.Infof("could not find pod disruption budget for critical operations")
if err = c.createCriticalOpPodDisruptionBudget(); err != nil {
if !k8sutil.ResourceAlreadyExists(err) {
return fmt.Errorf("could not create pod disruption budget for critical operations: %v", err)
}
c.logger.Infof("pod disruption budget %q already exists", util.NameFromMeta(pdb.ObjectMeta))
if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.criticalOpPodDisruptionBudgetName(), metav1.GetOptions{}); err != nil {
return fmt.Errorf("could not fetch existing %q pod disruption budget", util.NameFromMeta(pdb.ObjectMeta))
}
}
return nil
}
func (c *Cluster) syncPodDisruptionBudgets(isUpdate bool) error {
errors := make([]string, 0)
if err := c.syncPrimaryPodDisruptionBudget(isUpdate); err != nil {
errors = append(errors, fmt.Sprintf("%v", err))
}
if err := c.syncCriticalOpPodDisruptionBudget(isUpdate); err != nil {
errors = append(errors, fmt.Sprintf("%v", err))
}
if len(errors) > 0 {
return fmt.Errorf("%v", strings.Join(errors, `', '`))
}
return nil return nil
} }
@ -558,7 +496,6 @@ func (c *Cluster) syncStatefulSet() error {
) )
podsToRecreate := make([]v1.Pod, 0) podsToRecreate := make([]v1.Pod, 0)
isSafeToRecreatePods := true isSafeToRecreatePods := true
postponeReasons := make([]string, 0)
switchoverCandidates := make([]spec.NamespacedName, 0) switchoverCandidates := make([]spec.NamespacedName, 0)
pods, err := c.listPods() pods, err := c.listPods()
@ -600,7 +537,7 @@ func (c *Cluster) syncStatefulSet() error {
if err != nil { if err != nil {
return fmt.Errorf("could not generate statefulset: %v", err) return fmt.Errorf("could not generate statefulset: %v", err)
} }
c.logger.Debug("syncing statefulsets") c.logger.Debugf("syncing statefulsets")
// check if there are still pods with a rolling update flag // check if there are still pods with a rolling update flag
for _, pod := range pods { for _, pod := range pods {
if c.getRollingUpdateFlagFromPod(&pod) { if c.getRollingUpdateFlagFromPod(&pod) {
@ -615,7 +552,7 @@ func (c *Cluster) syncStatefulSet() error {
} }
if len(podsToRecreate) > 0 { if len(podsToRecreate) > 0 {
c.logger.Infof("%d / %d pod(s) still need to be rotated", len(podsToRecreate), len(pods)) c.logger.Debugf("%d / %d pod(s) still need to be rotated", len(podsToRecreate), len(pods))
} }
// statefulset is already there, make sure we use its definition in order to compare with the spec. // statefulset is already there, make sure we use its definition in order to compare with the spec.
@ -623,22 +560,13 @@ func (c *Cluster) syncStatefulSet() error {
cmp := c.compareStatefulSetWith(desiredSts) cmp := c.compareStatefulSetWith(desiredSts)
if !cmp.rollingUpdate { if !cmp.rollingUpdate {
updatedPodAnnotations := map[string]*string{}
for _, anno := range cmp.deletedPodAnnotations {
updatedPodAnnotations[anno] = nil
}
for anno, val := range desiredSts.Spec.Template.Annotations {
updatedPodAnnotations[anno] = &val
}
metadataReq := map[string]map[string]map[string]*string{"metadata": {"annotations": updatedPodAnnotations}}
patch, err := json.Marshal(metadataReq)
if err != nil {
return fmt.Errorf("could not form patch for pod annotations: %v", err)
}
for _, pod := range pods { for _, pod := range pods {
if changed, _ := c.compareAnnotations(pod.Annotations, desiredSts.Spec.Template.Annotations, nil); changed { if changed, _ := c.compareAnnotations(pod.Annotations, desiredSts.Spec.Template.Annotations); changed {
_, err = c.KubeClient.Pods(c.Namespace).Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}) patchData, err := metaAnnotationsPatch(desiredSts.Spec.Template.Annotations)
if err != nil {
return fmt.Errorf("could not form patch for pod %q annotations: %v", pod.Name, err)
}
_, err = c.KubeClient.Pods(pod.Namespace).Patch(context.TODO(), pod.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{})
if err != nil { if err != nil {
return fmt.Errorf("could not patch annotations for pod %q: %v", pod.Name, err) return fmt.Errorf("could not patch annotations for pod %q: %v", pod.Name, err)
} }
@ -717,14 +645,12 @@ func (c *Cluster) syncStatefulSet() error {
c.logger.Debug("syncing Patroni config") c.logger.Debug("syncing Patroni config")
if configPatched, restartPrimaryFirst, restartWait, err = c.syncPatroniConfig(pods, c.Spec.Patroni, requiredPgParameters); err != nil { if configPatched, restartPrimaryFirst, restartWait, err = c.syncPatroniConfig(pods, c.Spec.Patroni, requiredPgParameters); err != nil {
c.logger.Warningf("Patroni config updated? %v - errors during config sync: %v", configPatched, err) c.logger.Warningf("Patroni config updated? %v - errors during config sync: %v", configPatched, err)
postponeReasons = append(postponeReasons, "errors during Patroni config sync")
isSafeToRecreatePods = false isSafeToRecreatePods = false
} }
// restart Postgres where it is still pending // restart Postgres where it is still pending
if err = c.restartInstances(pods, restartWait, restartPrimaryFirst); err != nil { if err = c.restartInstances(pods, restartWait, restartPrimaryFirst); err != nil {
c.logger.Errorf("errors while restarting Postgres in pods via Patroni API: %v", err) c.logger.Errorf("errors while restarting Postgres in pods via Patroni API: %v", err)
postponeReasons = append(postponeReasons, "errors while restarting Postgres via Patroni API")
isSafeToRecreatePods = false isSafeToRecreatePods = false
} }
@ -732,14 +658,14 @@ func (c *Cluster) syncStatefulSet() error {
// statefulset or those that got their configuration from the outdated statefulset) // statefulset or those that got their configuration from the outdated statefulset)
if len(podsToRecreate) > 0 { if len(podsToRecreate) > 0 {
if isSafeToRecreatePods { if isSafeToRecreatePods {
c.logger.Info("performing rolling update") c.logger.Debugln("performing rolling update")
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", "Performing rolling update") c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", "Performing rolling update")
if err := c.recreatePods(podsToRecreate, switchoverCandidates); err != nil { if err := c.recreatePods(podsToRecreate, switchoverCandidates); err != nil {
return fmt.Errorf("could not recreate pods: %v", err) return fmt.Errorf("could not recreate pods: %v", err)
} }
c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", "Rolling update done - pods have been recreated") c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", "Rolling update done - pods have been recreated")
} else { } else {
c.logger.Warningf("postpone pod recreation until next sync - reason: %s", strings.Join(postponeReasons, `', '`)) c.logger.Warningf("postpone pod recreation until next sync because of errors during config sync")
} }
} }
@ -1045,7 +971,7 @@ func (c *Cluster) syncStandbyClusterConfiguration() error {
// carries the request to change configuration through // carries the request to change configuration through
for _, pod := range pods { for _, pod := range pods {
podName := util.NameFromMeta(pod.ObjectMeta) podName := util.NameFromMeta(pod.ObjectMeta)
c.logger.Infof("patching Postgres config via Patroni API on pod %s with following options: %s", c.logger.Debugf("patching Postgres config via Patroni API on pod %s with following options: %s",
podName, standbyOptionsToSet) podName, standbyOptionsToSet)
if err = c.patroni.SetStandbyClusterParameters(&pod, standbyOptionsToSet); err == nil { if err = c.patroni.SetStandbyClusterParameters(&pod, standbyOptionsToSet); err == nil {
return nil return nil
@ -1057,7 +983,7 @@ func (c *Cluster) syncStandbyClusterConfiguration() error {
} }
func (c *Cluster) syncSecrets() error { func (c *Cluster) syncSecrets() error {
c.logger.Debug("syncing secrets") c.logger.Info("syncing secrets")
c.setProcessName("syncing secrets") c.setProcessName("syncing secrets")
generatedSecrets := c.generateUserSecrets() generatedSecrets := c.generateUserSecrets()
retentionUsers := make([]string, 0) retentionUsers := make([]string, 0)
@ -1067,7 +993,7 @@ func (c *Cluster) syncSecrets() error {
secret, err := c.KubeClient.Secrets(generatedSecret.Namespace).Create(context.TODO(), generatedSecret, metav1.CreateOptions{}) secret, err := c.KubeClient.Secrets(generatedSecret.Namespace).Create(context.TODO(), generatedSecret, metav1.CreateOptions{})
if err == nil { if err == nil {
c.Secrets[secret.UID] = secret c.Secrets[secret.UID] = secret
c.logger.Infof("created new secret %s, namespace: %s, uid: %s", util.NameFromMeta(secret.ObjectMeta), generatedSecret.Namespace, secret.UID) c.logger.Debugf("created new secret %s, namespace: %s, uid: %s", util.NameFromMeta(secret.ObjectMeta), generatedSecret.Namespace, secret.UID)
continue continue
} }
if k8sutil.ResourceAlreadyExists(err) { if k8sutil.ResourceAlreadyExists(err) {
@ -1122,14 +1048,13 @@ func (c *Cluster) updateSecret(
// fetch user map to update later // fetch user map to update later
var userMap map[string]spec.PgUser var userMap map[string]spec.PgUser
var userKey string var userKey string
switch secretUsername { if secretUsername == c.systemUsers[constants.SuperuserKeyName].Name {
case c.systemUsers[constants.SuperuserKeyName].Name:
userKey = constants.SuperuserKeyName userKey = constants.SuperuserKeyName
userMap = c.systemUsers userMap = c.systemUsers
case c.systemUsers[constants.ReplicationUserKeyName].Name: } else if secretUsername == c.systemUsers[constants.ReplicationUserKeyName].Name {
userKey = constants.ReplicationUserKeyName userKey = constants.ReplicationUserKeyName
userMap = c.systemUsers userMap = c.systemUsers
default: } else {
userKey = secretUsername userKey = secretUsername
userMap = c.pgUsers userMap = c.pgUsers
} }
@ -1209,14 +1134,14 @@ func (c *Cluster) updateSecret(
} }
if updateSecret { if updateSecret {
c.logger.Infof("%s", updateSecretMsg) c.logger.Debugln(updateSecretMsg)
if secret, err = c.KubeClient.Secrets(secret.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{}); err != nil { if secret, err = c.KubeClient.Secrets(secret.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("could not update secret %s: %v", secretName, err) return fmt.Errorf("could not update secret %s: %v", secretName, err)
} }
c.Secrets[secret.UID] = secret c.Secrets[secret.UID] = secret
} }
if changed, _ := c.compareAnnotations(secret.Annotations, generatedSecret.Annotations, nil); changed { if changed, _ := c.compareAnnotations(secret.Annotations, generatedSecret.Annotations); changed {
patchData, err := metaAnnotationsPatch(generatedSecret.Annotations) patchData, err := metaAnnotationsPatch(generatedSecret.Annotations)
if err != nil { if err != nil {
return fmt.Errorf("could not form patch for secret %q annotations: %v", secret.Name, err) return fmt.Errorf("could not form patch for secret %q annotations: %v", secret.Name, err)
@ -1661,38 +1586,19 @@ func (c *Cluster) syncLogicalBackupJob() error {
} }
c.logger.Infof("logical backup job %s updated", c.getLogicalBackupJobName()) c.logger.Infof("logical backup job %s updated", c.getLogicalBackupJobName())
} }
if cmp := c.compareLogicalBackupJob(job, desiredJob); !cmp.match { if match, reason := c.compareLogicalBackupJob(job, desiredJob); !match {
c.logger.Infof("logical job %s is not in the desired state and needs to be updated", c.logger.Infof("logical job %s is not in the desired state and needs to be updated",
c.getLogicalBackupJobName(), c.getLogicalBackupJobName(),
) )
if len(cmp.reasons) != 0 { if reason != "" {
for _, reason := range cmp.reasons {
c.logger.Infof("reason: %s", reason) c.logger.Infof("reason: %s", reason)
} }
}
if len(cmp.deletedPodAnnotations) != 0 {
templateMetadataReq := map[string]map[string]map[string]map[string]map[string]map[string]map[string]*string{
"spec": {"jobTemplate": {"spec": {"template": {"metadata": {"annotations": {}}}}}}}
for _, anno := range cmp.deletedPodAnnotations {
templateMetadataReq["spec"]["jobTemplate"]["spec"]["template"]["metadata"]["annotations"][anno] = nil
}
patch, err := json.Marshal(templateMetadataReq)
if err != nil {
return fmt.Errorf("could not marshal ObjectMeta for logical backup job %q pod template: %v", jobName, err)
}
job, err = c.KubeClient.CronJobs(c.Namespace).Patch(context.TODO(), jobName, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "")
if err != nil {
c.logger.Errorf("failed to remove annotations from the logical backup job %q pod template: %v", jobName, err)
return err
}
}
if err = c.patchLogicalBackupJob(desiredJob); err != nil { if err = c.patchLogicalBackupJob(desiredJob); err != nil {
return fmt.Errorf("could not update logical backup job to match desired state: %v", err) return fmt.Errorf("could not update logical backup job to match desired state: %v", err)
} }
c.logger.Info("the logical backup job is synced") c.logger.Info("the logical backup job is synced")
} }
if changed, _ := c.compareAnnotations(job.Annotations, desiredJob.Annotations, nil); changed { if changed, _ := c.compareAnnotations(job.Annotations, desiredJob.Annotations); changed {
patchData, err := metaAnnotationsPatch(desiredJob.Annotations) patchData, err := metaAnnotationsPatch(desiredJob.Annotations)
if err != nil { if err != nil {
return fmt.Errorf("could not form patch for the logical backup job %q: %v", jobName, err) return fmt.Errorf("could not form patch for the logical backup job %q: %v", jobName, err)

View File

@ -2,14 +2,15 @@ package cluster
import ( import (
"bytes" "bytes"
"context"
"fmt" "fmt"
"io" "io"
"net/http" "net/http"
"slices"
"testing" "testing"
"time" "time"
"context"
"golang.org/x/exp/slices"
v1 "k8s.io/api/core/v1" v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/types"
@ -141,181 +142,6 @@ func TestSyncStatefulSetsAnnotations(t *testing.T) {
} }
} }
func TestPodAnnotationsSync(t *testing.T) {
clusterName := "acid-test-cluster-2"
namespace := "default"
podAnnotation := "no-scale-down"
podAnnotations := map[string]string{podAnnotation: "true"}
customPodAnnotation := "foo"
customPodAnnotations := map[string]string{customPodAnnotation: "true"}
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockClient := mocks.NewMockHTTPClient(ctrl)
client, _ := newFakeK8sAnnotationsClient()
pg := acidv1.Postgresql{
ObjectMeta: metav1.ObjectMeta{
Name: clusterName,
Namespace: namespace,
},
Spec: acidv1.PostgresSpec{
Volume: acidv1.Volume{
Size: "1Gi",
},
EnableConnectionPooler: boolToPointer(true),
EnableLogicalBackup: true,
EnableReplicaConnectionPooler: boolToPointer(true),
PodAnnotations: podAnnotations,
NumberOfInstances: 2,
},
}
var cluster = New(
Config{
OpConfig: config.Config{
PatroniAPICheckInterval: time.Duration(1),
PatroniAPICheckTimeout: time.Duration(5),
PodManagementPolicy: "ordered_ready",
CustomPodAnnotations: customPodAnnotations,
ConnectionPooler: config.ConnectionPooler{
ConnectionPoolerDefaultCPURequest: "100m",
ConnectionPoolerDefaultCPULimit: "100m",
ConnectionPoolerDefaultMemoryRequest: "100Mi",
ConnectionPoolerDefaultMemoryLimit: "100Mi",
NumberOfInstances: k8sutil.Int32ToPointer(1),
},
Resources: config.Resources{
ClusterLabels: map[string]string{"application": "spilo"},
ClusterNameLabel: "cluster-name",
DefaultCPURequest: "300m",
DefaultCPULimit: "300m",
DefaultMemoryRequest: "300Mi",
DefaultMemoryLimit: "300Mi",
MaxInstances: -1,
PodRoleLabel: "spilo-role",
ResourceCheckInterval: time.Duration(3),
ResourceCheckTimeout: time.Duration(10),
},
},
}, client, pg, logger, eventRecorder)
configJson := `{"postgresql": {"parameters": {"log_min_duration_statement": 200, "max_connections": 50}}}, "ttl": 20}`
response := http.Response{
StatusCode: 200,
Body: io.NopCloser(bytes.NewReader([]byte(configJson))),
}
mockClient.EXPECT().Do(gomock.Any()).Return(&response, nil).AnyTimes()
cluster.patroni = patroni.New(patroniLogger, mockClient)
cluster.Name = clusterName
cluster.Namespace = namespace
clusterOptions := clusterLabelsOptions(cluster)
// create a statefulset
_, err := cluster.createStatefulSet()
assert.NoError(t, err)
// create a pods
podsList := createPods(cluster)
for _, pod := range podsList {
_, err = cluster.KubeClient.Pods(namespace).Create(context.TODO(), &pod, metav1.CreateOptions{})
assert.NoError(t, err)
}
// create connection pooler
_, err = cluster.createConnectionPooler(mockInstallLookupFunction)
assert.NoError(t, err)
// create cron job
err = cluster.createLogicalBackupJob()
assert.NoError(t, err)
annotateResources(cluster)
err = cluster.Sync(&cluster.Postgresql)
assert.NoError(t, err)
// 1. PodAnnotations set
stsList, err := cluster.KubeClient.StatefulSets(namespace).List(context.TODO(), clusterOptions)
assert.NoError(t, err)
for _, sts := range stsList.Items {
for _, annotation := range []string{podAnnotation, customPodAnnotation} {
assert.Contains(t, sts.Spec.Template.Annotations, annotation)
}
}
for _, role := range []PostgresRole{Master, Replica} {
deploy, err := cluster.KubeClient.Deployments(namespace).Get(context.TODO(), cluster.connectionPoolerName(role), metav1.GetOptions{})
assert.NoError(t, err)
for _, annotation := range []string{podAnnotation, customPodAnnotation} {
assert.Contains(t, deploy.Spec.Template.Annotations, annotation,
fmt.Sprintf("pooler deployment pod template %s should contain annotation %s, found %#v",
deploy.Name, annotation, deploy.Spec.Template.Annotations))
}
}
podList, err := cluster.KubeClient.Pods(namespace).List(context.TODO(), clusterOptions)
assert.NoError(t, err)
for _, pod := range podList.Items {
for _, annotation := range []string{podAnnotation, customPodAnnotation} {
assert.Contains(t, pod.Annotations, annotation,
fmt.Sprintf("pod %s should contain annotation %s, found %#v", pod.Name, annotation, pod.Annotations))
}
}
cronJobList, err := cluster.KubeClient.CronJobs(namespace).List(context.TODO(), clusterOptions)
assert.NoError(t, err)
for _, cronJob := range cronJobList.Items {
for _, annotation := range []string{podAnnotation, customPodAnnotation} {
assert.Contains(t, cronJob.Spec.JobTemplate.Spec.Template.Annotations, annotation,
fmt.Sprintf("logical backup cron job's pod template should contain annotation %s, found %#v",
annotation, cronJob.Spec.JobTemplate.Spec.Template.Annotations))
}
}
// 2 PodAnnotations removed
newSpec := cluster.Postgresql.DeepCopy()
newSpec.Spec.PodAnnotations = nil
cluster.OpConfig.CustomPodAnnotations = nil
err = cluster.Sync(newSpec)
assert.NoError(t, err)
stsList, err = cluster.KubeClient.StatefulSets(namespace).List(context.TODO(), clusterOptions)
assert.NoError(t, err)
for _, sts := range stsList.Items {
for _, annotation := range []string{podAnnotation, customPodAnnotation} {
assert.NotContains(t, sts.Spec.Template.Annotations, annotation)
}
}
for _, role := range []PostgresRole{Master, Replica} {
deploy, err := cluster.KubeClient.Deployments(namespace).Get(context.TODO(), cluster.connectionPoolerName(role), metav1.GetOptions{})
assert.NoError(t, err)
for _, annotation := range []string{podAnnotation, customPodAnnotation} {
assert.NotContains(t, deploy.Spec.Template.Annotations, annotation,
fmt.Sprintf("pooler deployment pod template %s should not contain annotation %s, found %#v",
deploy.Name, annotation, deploy.Spec.Template.Annotations))
}
}
podList, err = cluster.KubeClient.Pods(namespace).List(context.TODO(), clusterOptions)
assert.NoError(t, err)
for _, pod := range podList.Items {
for _, annotation := range []string{podAnnotation, customPodAnnotation} {
assert.NotContains(t, pod.Annotations, annotation,
fmt.Sprintf("pod %s should not contain annotation %s, found %#v", pod.Name, annotation, pod.Annotations))
}
}
cronJobList, err = cluster.KubeClient.CronJobs(namespace).List(context.TODO(), clusterOptions)
assert.NoError(t, err)
for _, cronJob := range cronJobList.Items {
for _, annotation := range []string{podAnnotation, customPodAnnotation} {
assert.NotContains(t, cronJob.Spec.JobTemplate.Spec.Template.Annotations, annotation,
fmt.Sprintf("logical backup cron job's pod template should not contain annotation %s, found %#v",
annotation, cronJob.Spec.JobTemplate.Spec.Template.Annotations))
}
}
}
func TestCheckAndSetGlobalPostgreSQLConfiguration(t *testing.T) { func TestCheckAndSetGlobalPostgreSQLConfiguration(t *testing.T) {
testName := "test config comparison" testName := "test config comparison"
client, _ := newFakeK8sSyncClient() client, _ := newFakeK8sSyncClient()
@ -818,7 +644,7 @@ func TestUpdateSecret(t *testing.T) {
ApplicationId: appId, ApplicationId: appId,
Database: dbname, Database: dbname,
Tables: map[string]acidv1.StreamTable{ Tables: map[string]acidv1.StreamTable{
"data.foo": { "data.foo": acidv1.StreamTable{
EventType: "stream-type-b", EventType: "stream-type-b",
}, },
}, },

View File

@ -66,8 +66,7 @@ type ClusterStatus struct {
MasterEndpoint *v1.Endpoints MasterEndpoint *v1.Endpoints
ReplicaEndpoint *v1.Endpoints ReplicaEndpoint *v1.Endpoints
StatefulSet *appsv1.StatefulSet StatefulSet *appsv1.StatefulSet
PrimaryPodDisruptionBudget *policyv1.PodDisruptionBudget PodDisruptionBudget *policyv1.PodDisruptionBudget
CriticalOpPodDisruptionBudget *policyv1.PodDisruptionBudget
CurrentProcess Process CurrentProcess Process
Worker uint32 Worker uint32

View File

@ -193,7 +193,7 @@ func logNiceDiff(log *logrus.Entry, old, new interface{}) {
nice := nicediff.Diff(string(o), string(n), true) nice := nicediff.Diff(string(o), string(n), true)
for _, s := range strings.Split(nice, "\n") { for _, s := range strings.Split(nice, "\n") {
// " is not needed in the value to understand // " is not needed in the value to understand
log.Debug(strings.ReplaceAll(s, "\"", "")) log.Debugf(strings.ReplaceAll(s, "\"", ""))
} }
} }
@ -209,7 +209,7 @@ func (c *Cluster) logStatefulSetChanges(old, new *appsv1.StatefulSet, isUpdate b
logNiceDiff(c.logger, old.Spec, new.Spec) logNiceDiff(c.logger, old.Spec, new.Spec)
if !reflect.DeepEqual(old.Annotations, new.Annotations) { if !reflect.DeepEqual(old.Annotations, new.Annotations) {
c.logger.Debug("metadata.annotation are different") c.logger.Debugf("metadata.annotation are different")
logNiceDiff(c.logger, old.Annotations, new.Annotations) logNiceDiff(c.logger, old.Annotations, new.Annotations)
} }
@ -257,9 +257,9 @@ func (c *Cluster) getTeamMembers(teamID string) ([]string, error) {
if teamID == "" { if teamID == "" {
msg := "no teamId specified" msg := "no teamId specified"
if c.OpConfig.EnableTeamIdClusternamePrefix { if c.OpConfig.EnableTeamIdClusternamePrefix {
return nil, fmt.Errorf("%s", msg) return nil, fmt.Errorf(msg)
} }
c.logger.Warnf("%s", msg) c.logger.Warnf(msg)
return nil, nil return nil, nil
} }
@ -280,7 +280,7 @@ func (c *Cluster) getTeamMembers(teamID string) ([]string, error) {
} }
if !c.OpConfig.EnableTeamsAPI { if !c.OpConfig.EnableTeamsAPI {
c.logger.Debug("team API is disabled") c.logger.Debugf("team API is disabled")
return members, nil return members, nil
} }
@ -416,7 +416,7 @@ func (c *Cluster) _waitPodLabelsReady(anyReplica bool) error {
podsNumber = len(pods.Items) podsNumber = len(pods.Items)
c.logger.Debugf("Waiting for %d pods to become ready", podsNumber) c.logger.Debugf("Waiting for %d pods to become ready", podsNumber)
} else { } else {
c.logger.Debug("Waiting for any replica pod to become ready") c.logger.Debugf("Waiting for any replica pod to become ready")
} }
err := retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout, err := retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout,
@ -663,7 +663,7 @@ func parseResourceRequirements(resourcesRequirement v1.ResourceRequirements) (ac
return resources, nil return resources, nil
} }
func isInMaintenanceWindow(specMaintenanceWindows []acidv1.MaintenanceWindow) bool { func isInMainternanceWindow(specMaintenanceWindows []acidv1.MaintenanceWindow) bool {
if len(specMaintenanceWindows) == 0 { if len(specMaintenanceWindows) == 0 {
return true return true
} }

View File

@ -247,18 +247,18 @@ func createPods(cluster *Cluster) []v1.Pod {
for i, role := range []PostgresRole{Master, Replica} { for i, role := range []PostgresRole{Master, Replica} {
podsList = append(podsList, v1.Pod{ podsList = append(podsList, v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-%d", cluster.Name, i), Name: fmt.Sprintf("%s-%d", clusterName, i),
Namespace: namespace, Namespace: namespace,
Labels: map[string]string{ Labels: map[string]string{
"application": "spilo", "application": "spilo",
"cluster-name": cluster.Name, "cluster-name": clusterName,
"spilo-role": string(role), "spilo-role": string(role),
}, },
}, },
}) })
podsList = append(podsList, v1.Pod{ podsList = append(podsList, v1.Pod{
ObjectMeta: metav1.ObjectMeta{ ObjectMeta: metav1.ObjectMeta{
Name: fmt.Sprintf("%s-pooler-%s", cluster.Name, role), Name: fmt.Sprintf("%s-pooler-%s", clusterName, role),
Namespace: namespace, Namespace: namespace,
Labels: cluster.connectionPoolerLabels(role, true).MatchLabels, Labels: cluster.connectionPoolerLabels(role, true).MatchLabels,
}, },
@ -329,7 +329,7 @@ func newInheritedAnnotationsCluster(client k8sutil.KubernetesClient) (*Cluster,
if err != nil { if err != nil {
return nil, err return nil, err
} }
err = cluster.createPodDisruptionBudgets() _, err = cluster.createPodDisruptionBudget()
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -705,8 +705,8 @@ func TestIsInMaintenanceWindow(t *testing.T) {
for _, tt := range tests { for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) { t.Run(tt.name, func(t *testing.T) {
cluster.Spec.MaintenanceWindows = tt.windows cluster.Spec.MaintenanceWindows = tt.windows
if isInMaintenanceWindow(cluster.Spec.MaintenanceWindows) != tt.expected { if isInMainternanceWindow(cluster.Spec.MaintenanceWindows) != tt.expected {
t.Errorf("Expected isInMaintenanceWindow to return %t", tt.expected) t.Errorf("Expected isInMainternanceWindow to return %t", tt.expected)
} }
}) })
} }

View File

@ -13,9 +13,9 @@ import (
"github.com/aws/aws-sdk-go/aws" "github.com/aws/aws-sdk-go/aws"
"github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/spec"
"github.com/zalando/postgres-operator/pkg/util"
"github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/constants"
"github.com/zalando/postgres-operator/pkg/util/filesystems" "github.com/zalando/postgres-operator/pkg/util/filesystems"
"github.com/zalando/postgres-operator/pkg/util/k8sutil"
"github.com/zalando/postgres-operator/pkg/util/volumes" "github.com/zalando/postgres-operator/pkg/util/volumes"
) )
@ -66,7 +66,7 @@ func (c *Cluster) syncVolumes() error {
} }
func (c *Cluster) syncUnderlyingEBSVolume() error { func (c *Cluster) syncUnderlyingEBSVolume() error {
c.logger.Debug("starting to sync EBS volumes: type, iops, throughput, and size") c.logger.Infof("starting to sync EBS volumes: type, iops, throughput, and size")
var ( var (
err error err error
@ -129,14 +129,14 @@ func (c *Cluster) syncUnderlyingEBSVolume() error {
if len(errors) > 0 { if len(errors) > 0 {
for _, s := range errors { for _, s := range errors {
c.logger.Warningf("%s", s) c.logger.Warningf(s)
} }
} }
return nil return nil
} }
func (c *Cluster) populateVolumeMetaData() error { func (c *Cluster) populateVolumeMetaData() error {
c.logger.Debug("starting reading ebs meta data") c.logger.Infof("starting reading ebs meta data")
pvs, err := c.listPersistentVolumes() pvs, err := c.listPersistentVolumes()
if err != nil { if err != nil {
@ -151,7 +151,7 @@ func (c *Cluster) populateVolumeMetaData() error {
volumeIds := []string{} volumeIds := []string{}
var volumeID string var volumeID string
for _, pv := range pvs { for _, pv := range pvs {
volumeID, err = c.VolumeResizer.GetProviderVolumeID(pv) volumeID, err = c.VolumeResizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID)
if err != nil { if err != nil {
continue continue
} }
@ -165,7 +165,7 @@ func (c *Cluster) populateVolumeMetaData() error {
} }
if len(currentVolumes) != len(c.EBSVolumes) && len(c.EBSVolumes) > 0 { if len(currentVolumes) != len(c.EBSVolumes) && len(c.EBSVolumes) > 0 {
c.logger.Infof("number of ebs volumes (%d) discovered differs from already known volumes (%d)", len(currentVolumes), len(c.EBSVolumes)) c.logger.Debugf("number of ebs volumes (%d) discovered differs from already known volumes (%d)", len(currentVolumes), len(c.EBSVolumes))
} }
// reset map, operator is not responsible for dangling ebs volumes // reset map, operator is not responsible for dangling ebs volumes
@ -185,7 +185,7 @@ func (c *Cluster) syncVolumeClaims() error {
if c.OpConfig.StorageResizeMode == "off" || c.OpConfig.StorageResizeMode == "ebs" { if c.OpConfig.StorageResizeMode == "off" || c.OpConfig.StorageResizeMode == "ebs" {
ignoreResize = true ignoreResize = true
c.logger.Debugf("Storage resize mode is set to %q. Skipping volume size sync of persistent volume claims.", c.OpConfig.StorageResizeMode) c.logger.Debugf("Storage resize mode is set to %q. Skipping volume size sync of PVCs.", c.OpConfig.StorageResizeMode)
} }
newSize, err := resource.ParseQuantity(c.Spec.Volume.Size) newSize, err := resource.ParseQuantity(c.Spec.Volume.Size)
@ -196,49 +196,45 @@ func (c *Cluster) syncVolumeClaims() error {
pvcs, err := c.listPersistentVolumeClaims() pvcs, err := c.listPersistentVolumeClaims()
if err != nil { if err != nil {
return fmt.Errorf("could not list persistent volume claims: %v", err) return fmt.Errorf("could not receive persistent volume claims: %v", err)
} }
for _, pvc := range pvcs { for _, pvc := range pvcs {
c.VolumeClaims[pvc.UID] = &pvc
needsUpdate := false needsUpdate := false
currentSize := quantityToGigabyte(pvc.Spec.Resources.Requests[v1.ResourceStorage]) currentSize := quantityToGigabyte(pvc.Spec.Resources.Requests[v1.ResourceStorage])
if !ignoreResize && currentSize != manifestSize { if !ignoreResize && currentSize != manifestSize {
if currentSize < manifestSize { if currentSize < manifestSize {
pvc.Spec.Resources.Requests[v1.ResourceStorage] = newSize pvc.Spec.Resources.Requests[v1.ResourceStorage] = newSize
needsUpdate = true needsUpdate = true
c.logger.Infof("persistent volume claim for volume %q needs to be resized", pvc.Name) c.logger.Debugf("persistent volume claim for volume %q needs to be resized", pvc.Name)
} else { } else {
c.logger.Warningf("cannot shrink persistent volume") c.logger.Warningf("cannot shrink persistent volume")
} }
} }
if needsUpdate { if needsUpdate {
c.logger.Infof("updating persistent volume claim definition for volume %q", pvc.Name) c.logger.Debugf("updating persistent volume claim definition for volume %q", pvc.Name)
updatedPvc, err := c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Update(context.TODO(), &pvc, metav1.UpdateOptions{}) if _, err := c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Update(context.TODO(), &pvc, metav1.UpdateOptions{}); err != nil {
if err != nil {
return fmt.Errorf("could not update persistent volume claim: %q", err) return fmt.Errorf("could not update persistent volume claim: %q", err)
} }
c.VolumeClaims[pvc.UID] = updatedPvc c.logger.Debugf("successfully updated persistent volume claim %q", pvc.Name)
c.logger.Infof("successfully updated persistent volume claim %q", pvc.Name)
} else { } else {
c.logger.Debugf("volume claim for volume %q do not require updates", pvc.Name) c.logger.Debugf("volume claim for volume %q do not require updates", pvc.Name)
} }
newAnnotations := c.annotationsSet(nil) newAnnotations := c.annotationsSet(nil)
if changed, _ := c.compareAnnotations(pvc.Annotations, newAnnotations, nil); changed { if changed, _ := c.compareAnnotations(pvc.Annotations, newAnnotations); changed {
patchData, err := metaAnnotationsPatch(newAnnotations) patchData, err := metaAnnotationsPatch(newAnnotations)
if err != nil { if err != nil {
return fmt.Errorf("could not form patch for the persistent volume claim for volume %q: %v", pvc.Name, err) return fmt.Errorf("could not form patch for the persistent volume claim for volume %q: %v", pvc.Name, err)
} }
patchedPvc, err := c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Patch(context.TODO(), pvc.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) _, err = c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Patch(context.TODO(), pvc.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{})
if err != nil { if err != nil {
return fmt.Errorf("could not patch annotations of the persistent volume claim for volume %q: %v", pvc.Name, err) return fmt.Errorf("could not patch annotations of the persistent volume claim for volume %q: %v", pvc.Name, err)
} }
c.VolumeClaims[pvc.UID] = patchedPvc
} }
} }
c.logger.Debug("volume claims have been synced successfully") c.logger.Infof("volume claims have been synced successfully")
return nil return nil
} }
@ -259,7 +255,7 @@ func (c *Cluster) syncEbsVolumes() error {
return fmt.Errorf("could not sync volumes: %v", err) return fmt.Errorf("could not sync volumes: %v", err)
} }
c.logger.Debug("volumes have been synced successfully") c.logger.Infof("volumes have been synced successfully")
return nil return nil
} }
@ -272,50 +268,38 @@ func (c *Cluster) listPersistentVolumeClaims() ([]v1.PersistentVolumeClaim, erro
pvcs, err := c.KubeClient.PersistentVolumeClaims(ns).List(context.TODO(), listOptions) pvcs, err := c.KubeClient.PersistentVolumeClaims(ns).List(context.TODO(), listOptions)
if err != nil { if err != nil {
return nil, fmt.Errorf("could not list of persistent volume claims: %v", err) return nil, fmt.Errorf("could not list of PersistentVolumeClaims: %v", err)
} }
return pvcs.Items, nil return pvcs.Items, nil
} }
func (c *Cluster) deletePersistentVolumeClaims() error { func (c *Cluster) deletePersistentVolumeClaims() error {
c.setProcessName("deleting persistent volume claims") c.logger.Debugln("deleting PVCs")
errors := make([]string, 0) pvcs, err := c.listPersistentVolumeClaims()
for uid := range c.VolumeClaims {
err := c.deletePersistentVolumeClaim(uid)
if err != nil { if err != nil {
errors = append(errors, fmt.Sprintf("%v", err)) return err
}
for _, pvc := range pvcs {
c.logger.Debugf("deleting PVC %q", util.NameFromMeta(pvc.ObjectMeta))
if err := c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, c.deleteOptions); err != nil {
c.logger.Warningf("could not delete PersistentVolumeClaim: %v", err)
} }
} }
if len(pvcs) > 0 {
if len(errors) > 0 { c.logger.Debugln("PVCs have been deleted")
c.logger.Warningf("could not delete all persistent volume claims: %v", strings.Join(errors, `', '`)) } else {
c.logger.Debugln("no PVCs to delete")
} }
return nil return nil
} }
func (c *Cluster) deletePersistentVolumeClaim(uid types.UID) error {
c.setProcessName("deleting persistent volume claim")
pvc := c.VolumeClaims[uid]
c.logger.Debugf("deleting persistent volume claim %q", pvc.Name)
err := c.KubeClient.PersistentVolumeClaims(pvc.Namespace).Delete(context.TODO(), pvc.Name, c.deleteOptions)
if k8sutil.ResourceNotFound(err) {
c.logger.Debugf("persistent volume claim %q has already been deleted", pvc.Name)
} else if err != nil {
return fmt.Errorf("could not delete persistent volume claim %q: %v", pvc.Name, err)
}
c.logger.Infof("persistent volume claim %q has been deleted", pvc.Name)
delete(c.VolumeClaims, uid)
return nil
}
func (c *Cluster) listPersistentVolumes() ([]*v1.PersistentVolume, error) { func (c *Cluster) listPersistentVolumes() ([]*v1.PersistentVolume, error) {
result := make([]*v1.PersistentVolume, 0) result := make([]*v1.PersistentVolume, 0)
pvcs, err := c.listPersistentVolumeClaims() pvcs, err := c.listPersistentVolumeClaims()
if err != nil { if err != nil {
return nil, fmt.Errorf("could not list cluster's persistent volume claims: %v", err) return nil, fmt.Errorf("could not list cluster's PersistentVolumeClaims: %v", err)
} }
pods, err := c.listPods() pods, err := c.listPods()
@ -398,22 +382,22 @@ func (c *Cluster) resizeVolumes() error {
if err != nil { if err != nil {
return err return err
} }
c.logger.Infof("updating persistent volume %q to %d", pv.Name, newSize) c.logger.Debugf("updating persistent volume %q to %d", pv.Name, newSize)
if err := resizer.ResizeVolume(awsVolumeID, newSize); err != nil { if err := resizer.ResizeVolume(awsVolumeID, newSize); err != nil {
return fmt.Errorf("could not resize EBS volume %q: %v", awsVolumeID, err) return fmt.Errorf("could not resize EBS volume %q: %v", awsVolumeID, err)
} }
c.logger.Infof("resizing the filesystem on the volume %q", pv.Name) c.logger.Debugf("resizing the filesystem on the volume %q", pv.Name)
podName := getPodNameFromPersistentVolume(pv) podName := getPodNameFromPersistentVolume(pv)
if err := c.resizePostgresFilesystem(podName, []filesystems.FilesystemResizer{&filesystems.Ext234Resize{}}); err != nil { if err := c.resizePostgresFilesystem(podName, []filesystems.FilesystemResizer{&filesystems.Ext234Resize{}}); err != nil {
return fmt.Errorf("could not resize the filesystem on pod %q: %v", podName, err) return fmt.Errorf("could not resize the filesystem on pod %q: %v", podName, err)
} }
c.logger.Infof("filesystem resize successful on volume %q", pv.Name) c.logger.Debugf("filesystem resize successful on volume %q", pv.Name)
pv.Spec.Capacity[v1.ResourceStorage] = newQuantity pv.Spec.Capacity[v1.ResourceStorage] = newQuantity
c.logger.Infof("updating persistent volume definition for volume %q", pv.Name) c.logger.Debugf("updating persistent volume definition for volume %q", pv.Name)
if _, err := c.KubeClient.PersistentVolumes().Update(context.TODO(), pv, metav1.UpdateOptions{}); err != nil { if _, err := c.KubeClient.PersistentVolumes().Update(context.TODO(), pv, metav1.UpdateOptions{}); err != nil {
return fmt.Errorf("could not update persistent volume: %q", err) return fmt.Errorf("could not update persistent volume: %q", err)
} }
c.logger.Infof("successfully updated persistent volume %q", pv.Name) c.logger.Debugf("successfully updated persistent volume %q", pv.Name)
if !compatible { if !compatible {
c.logger.Warningf("volume %q is incompatible with all available resizing providers, consider switching storage_resize_mode to pvc or off", pv.Name) c.logger.Warningf("volume %q is incompatible with all available resizing providers, consider switching storage_resize_mode to pvc or off", pv.Name)
@ -474,7 +458,7 @@ func (c *Cluster) executeEBSMigration() error {
} }
if !hasGp2 { if !hasGp2 {
c.logger.Debugf("no EBS gp2 volumes left to migrate") c.logger.Infof("no EBS gp2 volumes left to migrate")
return nil return nil
} }
} }

View File

@ -93,7 +93,7 @@ func TestResizeVolumeClaim(t *testing.T) {
// check if listPersistentVolumeClaims returns only the PVCs matching the filter // check if listPersistentVolumeClaims returns only the PVCs matching the filter
if len(pvcs) != len(pvcList.Items)-1 { if len(pvcs) != len(pvcList.Items)-1 {
t.Errorf("%s: could not find all persistent volume claims, got %v, expected %v", testName, len(pvcs), len(pvcList.Items)-1) t.Errorf("%s: could not find all PVCs, got %v, expected %v", testName, len(pvcs), len(pvcList.Items)-1)
} }
// check if PVCs were correctly resized // check if PVCs were correctly resized
@ -165,7 +165,7 @@ func CreatePVCs(namespace string, clusterName string, labels labels.Set, n int,
Labels: labels, Labels: labels,
}, },
Spec: v1.PersistentVolumeClaimSpec{ Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.VolumeResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{ Requests: v1.ResourceList{
v1.ResourceStorage: storage1Gi, v1.ResourceStorage: storage1Gi,
}, },
@ -216,12 +216,6 @@ func TestMigrateEBS(t *testing.T) {
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil) resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil)
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil) resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil)
resizer.EXPECT().GetProviderVolumeID(gomock.Any()).
DoAndReturn(func(pv *v1.PersistentVolume) (string, error) {
return resizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID)
}).
Times(2)
resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return( resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return(
[]volumes.VolumeProperties{ []volumes.VolumeProperties{
{VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 100}, {VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 100},
@ -262,7 +256,7 @@ func initTestVolumesAndPods(client k8sutil.KubernetesClient, namespace, clustern
Labels: labels, Labels: labels,
}, },
Spec: v1.PersistentVolumeClaimSpec{ Spec: v1.PersistentVolumeClaimSpec{
Resources: v1.VolumeResourceRequirements{ Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{ Requests: v1.ResourceList{
v1.ResourceStorage: storage1Gi, v1.ResourceStorage: storage1Gi,
}, },
@ -328,12 +322,6 @@ func TestMigrateGp3Support(t *testing.T) {
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil) resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil)
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-3")).Return("ebs-volume-3", nil) resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-3")).Return("ebs-volume-3", nil)
resizer.EXPECT().GetProviderVolumeID(gomock.Any()).
DoAndReturn(func(pv *v1.PersistentVolume) (string, error) {
return resizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID)
}).
Times(3)
resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2", "ebs-volume-3"})).Return( resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2", "ebs-volume-3"})).Return(
[]volumes.VolumeProperties{ []volumes.VolumeProperties{
{VolumeID: "ebs-volume-1", VolumeType: "gp3", Size: 100, Iops: 3000}, {VolumeID: "ebs-volume-1", VolumeType: "gp3", Size: 100, Iops: 3000},
@ -389,12 +377,6 @@ func TestManualGp2Gp3Support(t *testing.T) {
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil) resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil)
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil) resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil)
resizer.EXPECT().GetProviderVolumeID(gomock.Any()).
DoAndReturn(func(pv *v1.PersistentVolume) (string, error) {
return resizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID)
}).
Times(2)
resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return( resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return(
[]volumes.VolumeProperties{ []volumes.VolumeProperties{
{VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 150, Iops: 3000}, {VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 150, Iops: 3000},
@ -454,12 +436,6 @@ func TestDontTouchType(t *testing.T) {
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil) resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil)
resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil) resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil)
resizer.EXPECT().GetProviderVolumeID(gomock.Any()).
DoAndReturn(func(pv *v1.PersistentVolume) (string, error) {
return resizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID)
}).
Times(2)
resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return( resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return(
[]volumes.VolumeProperties{ []volumes.VolumeProperties{
{VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 150, Iops: 3000}, {VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 150, Iops: 3000},

View File

@ -39,7 +39,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
result.EnableTeamIdClusternamePrefix = fromCRD.EnableTeamIdClusternamePrefix result.EnableTeamIdClusternamePrefix = fromCRD.EnableTeamIdClusternamePrefix
result.EtcdHost = fromCRD.EtcdHost result.EtcdHost = fromCRD.EtcdHost
result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps
result.DockerImage = util.Coalesce(fromCRD.DockerImage, "ghcr.io/zalando/spilo-17:4.0-p3") result.DockerImage = util.Coalesce(fromCRD.DockerImage, "ghcr.io/zalando/spilo-16:3.3-p1")
result.Workers = util.CoalesceUInt32(fromCRD.Workers, 8) result.Workers = util.CoalesceUInt32(fromCRD.Workers, 8)
result.MinInstances = fromCRD.MinInstances result.MinInstances = fromCRD.MinInstances
result.MaxInstances = fromCRD.MaxInstances result.MaxInstances = fromCRD.MaxInstances
@ -62,8 +62,8 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
// major version upgrade config // major version upgrade config
result.MajorVersionUpgradeMode = util.Coalesce(fromCRD.MajorVersionUpgrade.MajorVersionUpgradeMode, "manual") result.MajorVersionUpgradeMode = util.Coalesce(fromCRD.MajorVersionUpgrade.MajorVersionUpgradeMode, "manual")
result.MajorVersionUpgradeTeamAllowList = fromCRD.MajorVersionUpgrade.MajorVersionUpgradeTeamAllowList result.MajorVersionUpgradeTeamAllowList = fromCRD.MajorVersionUpgrade.MajorVersionUpgradeTeamAllowList
result.MinimalMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.MinimalMajorVersion, "13") result.MinimalMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.MinimalMajorVersion, "12")
result.TargetMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.TargetMajorVersion, "17") result.TargetMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.TargetMajorVersion, "16")
// kubernetes config // kubernetes config
result.EnableOwnerReferences = util.CoalesceBool(fromCRD.Kubernetes.EnableOwnerReferences, util.False()) result.EnableOwnerReferences = util.CoalesceBool(fromCRD.Kubernetes.EnableOwnerReferences, util.False())
@ -180,7 +180,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur
// logical backup config // logical backup config
result.LogicalBackupSchedule = util.Coalesce(fromCRD.LogicalBackup.Schedule, "30 00 * * *") result.LogicalBackupSchedule = util.Coalesce(fromCRD.LogicalBackup.Schedule, "30 00 * * *")
result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "ghcr.io/zalando/postgres-operator/logical-backup:v1.15.0") result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0")
result.LogicalBackupProvider = util.Coalesce(fromCRD.LogicalBackup.BackupProvider, "s3") result.LogicalBackupProvider = util.Coalesce(fromCRD.LogicalBackup.BackupProvider, "s3")
result.LogicalBackupAzureStorageAccountName = fromCRD.LogicalBackup.AzureStorageAccountName result.LogicalBackupAzureStorageAccountName = fromCRD.LogicalBackup.AzureStorageAccountName
result.LogicalBackupAzureStorageAccountKey = fromCRD.LogicalBackup.AzureStorageAccountKey result.LogicalBackupAzureStorageAccountKey = fromCRD.LogicalBackup.AzureStorageAccountKey

View File

@ -143,7 +143,7 @@ func (c *Controller) acquireInitialListOfClusters() error {
if list, err = c.listClusters(metav1.ListOptions{ResourceVersion: "0"}); err != nil { if list, err = c.listClusters(metav1.ListOptions{ResourceVersion: "0"}); err != nil {
return err return err
} }
c.logger.Debug("acquiring initial list of clusters") c.logger.Debugf("acquiring initial list of clusters")
for _, pg := range list.Items { for _, pg := range list.Items {
// XXX: check the cluster status field instead // XXX: check the cluster status field instead
if pg.Error != "" { if pg.Error != "" {
@ -597,7 +597,7 @@ func (c *Controller) createPodServiceAccount(namespace string) error {
_, err := c.KubeClient.ServiceAccounts(namespace).Get(context.TODO(), podServiceAccountName, metav1.GetOptions{}) _, err := c.KubeClient.ServiceAccounts(namespace).Get(context.TODO(), podServiceAccountName, metav1.GetOptions{})
if k8sutil.ResourceNotFound(err) { if k8sutil.ResourceNotFound(err) {
c.logger.Infof("creating pod service account %q in the %q namespace", podServiceAccountName, namespace) c.logger.Infof(fmt.Sprintf("creating pod service account %q in the %q namespace", podServiceAccountName, namespace))
// get a separate copy of service account // get a separate copy of service account
// to prevent a race condition when setting a namespace for many clusters // to prevent a race condition when setting a namespace for many clusters

View File

@ -76,8 +76,9 @@ func (c *Controller) createOperatorCRD(desiredCrd *apiextv1.CustomResourceDefini
context.TODO(), crd.Name, types.MergePatchType, patch, metav1.PatchOptions{}); err != nil { context.TODO(), crd.Name, types.MergePatchType, patch, metav1.PatchOptions{}); err != nil {
return fmt.Errorf("could not update customResourceDefinition %q: %v", crd.Name, err) return fmt.Errorf("could not update customResourceDefinition %q: %v", crd.Name, err)
} }
} else {
c.logger.Infof("customResourceDefinition %q has been registered", crd.Name)
} }
c.logger.Infof("customResourceDefinition %q is registered", crd.Name)
return wait.PollUntilContextTimeout(context.TODO(), c.config.CRDReadyWaitInterval, c.config.CRDReadyWaitTimeout, false, func(ctx context.Context) (bool, error) { return wait.PollUntilContextTimeout(context.TODO(), c.config.CRDReadyWaitInterval, c.config.CRDReadyWaitTimeout, false, func(ctx context.Context) (bool, error) {
c, err := c.KubeClient.CustomResourceDefinitions().Get(context.TODO(), desiredCrd.Name, metav1.GetOptions{}) c, err := c.KubeClient.CustomResourceDefinitions().Get(context.TODO(), desiredCrd.Name, metav1.GetOptions{})
@ -248,7 +249,7 @@ func (c *Controller) getInfrastructureRoles(
} }
if len(errors) > 0 { if len(errors) > 0 {
return uniqRoles, fmt.Errorf("%s", strings.Join(errors, `', '`)) return uniqRoles, fmt.Errorf(strings.Join(errors, `', '`))
} }
return uniqRoles, nil return uniqRoles, nil

View File

@ -132,7 +132,7 @@ func TestOldInfrastructureRoleFormat(t *testing.T) {
for _, test := range testTable { for _, test := range testTable {
roles, err := utilTestController.getInfrastructureRoles( roles, err := utilTestController.getInfrastructureRoles(
[]*config.InfrastructureRole{ []*config.InfrastructureRole{
{ &config.InfrastructureRole{
SecretName: test.secretName, SecretName: test.secretName,
UserKey: "user", UserKey: "user",
PasswordKey: "password", PasswordKey: "password",
@ -163,7 +163,7 @@ func TestNewInfrastructureRoleFormat(t *testing.T) {
// one secret with one configmap // one secret with one configmap
{ {
[]spec.NamespacedName{ []spec.NamespacedName{
{ spec.NamespacedName{
Namespace: v1.NamespaceDefault, Namespace: v1.NamespaceDefault,
Name: testInfrastructureRolesNewSecretName, Name: testInfrastructureRolesNewSecretName,
}, },
@ -187,11 +187,11 @@ func TestNewInfrastructureRoleFormat(t *testing.T) {
// multiple standalone secrets // multiple standalone secrets
{ {
[]spec.NamespacedName{ []spec.NamespacedName{
{ spec.NamespacedName{
Namespace: v1.NamespaceDefault, Namespace: v1.NamespaceDefault,
Name: "infrastructureroles-new-test1", Name: "infrastructureroles-new-test1",
}, },
{ spec.NamespacedName{
Namespace: v1.NamespaceDefault, Namespace: v1.NamespaceDefault,
Name: "infrastructureroles-new-test2", Name: "infrastructureroles-new-test2",
}, },
@ -248,7 +248,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) {
// only new CRD format // only new CRD format
{ {
[]*config.InfrastructureRole{ []*config.InfrastructureRole{
{ &config.InfrastructureRole{
SecretName: spec.NamespacedName{ SecretName: spec.NamespacedName{
Namespace: v1.NamespaceDefault, Namespace: v1.NamespaceDefault,
Name: testInfrastructureRolesNewSecretName, Name: testInfrastructureRolesNewSecretName,
@ -262,7 +262,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) {
spec.NamespacedName{}, spec.NamespacedName{},
"", "",
[]*config.InfrastructureRole{ []*config.InfrastructureRole{
{ &config.InfrastructureRole{
SecretName: spec.NamespacedName{ SecretName: spec.NamespacedName{
Namespace: v1.NamespaceDefault, Namespace: v1.NamespaceDefault,
Name: testInfrastructureRolesNewSecretName, Name: testInfrastructureRolesNewSecretName,
@ -280,7 +280,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) {
spec.NamespacedName{}, spec.NamespacedName{},
"secretname: infrastructureroles-new-test, userkey: test-user, passwordkey: test-password, rolekey: test-role", "secretname: infrastructureroles-new-test, userkey: test-user, passwordkey: test-password, rolekey: test-role",
[]*config.InfrastructureRole{ []*config.InfrastructureRole{
{ &config.InfrastructureRole{
SecretName: spec.NamespacedName{ SecretName: spec.NamespacedName{
Namespace: v1.NamespaceDefault, Namespace: v1.NamespaceDefault,
Name: testInfrastructureRolesNewSecretName, Name: testInfrastructureRolesNewSecretName,
@ -298,7 +298,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) {
spec.NamespacedName{}, spec.NamespacedName{},
"secretname: infrastructureroles-new-test, userkey: test-user, passwordkey: test-password, defaultrolevalue: test-role", "secretname: infrastructureroles-new-test, userkey: test-user, passwordkey: test-password, defaultrolevalue: test-role",
[]*config.InfrastructureRole{ []*config.InfrastructureRole{
{ &config.InfrastructureRole{
SecretName: spec.NamespacedName{ SecretName: spec.NamespacedName{
Namespace: v1.NamespaceDefault, Namespace: v1.NamespaceDefault,
Name: testInfrastructureRolesNewSecretName, Name: testInfrastructureRolesNewSecretName,
@ -319,7 +319,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) {
}, },
"", "",
[]*config.InfrastructureRole{ []*config.InfrastructureRole{
{ &config.InfrastructureRole{
SecretName: spec.NamespacedName{ SecretName: spec.NamespacedName{
Namespace: v1.NamespaceDefault, Namespace: v1.NamespaceDefault,
Name: testInfrastructureRolesOldSecretName, Name: testInfrastructureRolesOldSecretName,
@ -334,7 +334,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) {
// both formats for CRD // both formats for CRD
{ {
[]*config.InfrastructureRole{ []*config.InfrastructureRole{
{ &config.InfrastructureRole{
SecretName: spec.NamespacedName{ SecretName: spec.NamespacedName{
Namespace: v1.NamespaceDefault, Namespace: v1.NamespaceDefault,
Name: testInfrastructureRolesNewSecretName, Name: testInfrastructureRolesNewSecretName,
@ -351,7 +351,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) {
}, },
"", "",
[]*config.InfrastructureRole{ []*config.InfrastructureRole{
{ &config.InfrastructureRole{
SecretName: spec.NamespacedName{ SecretName: spec.NamespacedName{
Namespace: v1.NamespaceDefault, Namespace: v1.NamespaceDefault,
Name: testInfrastructureRolesNewSecretName, Name: testInfrastructureRolesNewSecretName,
@ -361,7 +361,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) {
RoleKey: "test-role", RoleKey: "test-role",
Template: false, Template: false,
}, },
{ &config.InfrastructureRole{
SecretName: spec.NamespacedName{ SecretName: spec.NamespacedName{
Namespace: v1.NamespaceDefault, Namespace: v1.NamespaceDefault,
Name: testInfrastructureRolesOldSecretName, Name: testInfrastructureRolesOldSecretName,
@ -382,7 +382,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) {
}, },
"secretname: infrastructureroles-new-test, userkey: test-user, passwordkey: test-password, rolekey: test-role", "secretname: infrastructureroles-new-test, userkey: test-user, passwordkey: test-password, rolekey: test-role",
[]*config.InfrastructureRole{ []*config.InfrastructureRole{
{ &config.InfrastructureRole{
SecretName: spec.NamespacedName{ SecretName: spec.NamespacedName{
Namespace: v1.NamespaceDefault, Namespace: v1.NamespaceDefault,
Name: testInfrastructureRolesNewSecretName, Name: testInfrastructureRolesNewSecretName,
@ -392,7 +392,7 @@ func TestInfrastructureRoleDefinitions(t *testing.T) {
RoleKey: "test-role", RoleKey: "test-role",
Template: false, Template: false,
}, },
{ &config.InfrastructureRole{
SecretName: spec.NamespacedName{ SecretName: spec.NamespacedName{
Namespace: v1.NamespaceDefault, Namespace: v1.NamespaceDefault,
Name: testInfrastructureRolesOldSecretName, Name: testInfrastructureRolesOldSecretName,

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2025 Compose, Zalando SE Copyright 2024 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2025 Compose, Zalando SE Copyright 2024 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2025 Compose, Zalando SE Copyright 2024 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2025 Compose, Zalando SE Copyright 2024 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2025 Compose, Zalando SE Copyright 2024 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2025 Compose, Zalando SE Copyright 2024 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2025 Compose, Zalando SE Copyright 2024 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2025 Compose, Zalando SE Copyright 2024 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2025 Compose, Zalando SE Copyright 2024 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2025 Compose, Zalando SE Copyright 2024 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2025 Compose, Zalando SE Copyright 2024 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

View File

@ -1,5 +1,5 @@
/* /*
Copyright 2025 Compose, Zalando SE Copyright 2024 Compose, Zalando SE
Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal

Some files were not shown because too many files have changed in this diff Show More