diff --git a/.github/ISSUE_TEMPLATE/postgres-operator-issue-template.md b/.github/ISSUE_TEMPLATE/postgres-operator-issue-template.md index ee3a704ea..3b731eb72 100644 --- a/.github/ISSUE_TEMPLATE/postgres-operator-issue-template.md +++ b/.github/ISSUE_TEMPLATE/postgres-operator-issue-template.md @@ -9,7 +9,7 @@ assignees: '' Please, answer some short questions which should help us to understand your problem / question better? -- **Which image of the operator are you using?** e.g. ghcr.io/zalando/postgres-operator:v1.13.0 +- **Which image of the operator are you using?** e.g. ghcr.io/zalando/postgres-operator:v1.15.1 - **Where do you run it - cloud or metal? Kubernetes or OpenShift?** [AWS K8s | GCP ... | Bare Metal K8s] - **Are you running Postgres Operator in production?** [yes | no] - **Type of issue?** [Bug report, question, feature request, etc.] diff --git a/.github/workflows/publish_ghcr_image.yaml b/.github/workflows/publish_ghcr_image.yaml index d56ff2f17..78815783a 100644 --- a/.github/workflows/publish_ghcr_image.yaml +++ b/.github/workflows/publish_ghcr_image.yaml @@ -23,7 +23,7 @@ jobs: - uses: actions/setup-go@v2 with: - go-version: "^1.23.4" + go-version: "^1.25.3" - name: Run unit tests run: make deps mocks test diff --git a/.github/workflows/run_e2e.yaml b/.github/workflows/run_e2e.yaml index 16573046e..86f861ec5 100644 --- a/.github/workflows/run_e2e.yaml +++ b/.github/workflows/run_e2e.yaml @@ -14,7 +14,7 @@ jobs: - uses: actions/checkout@v1 - uses: actions/setup-go@v2 with: - go-version: "^1.23.4" + go-version: "^1.25.3" - name: Make dependencies run: make deps mocks - name: Code generation diff --git a/.github/workflows/run_tests.yaml b/.github/workflows/run_tests.yaml index db47f6e40..ec3e5eaf6 100644 --- a/.github/workflows/run_tests.yaml +++ b/.github/workflows/run_tests.yaml @@ -14,7 +14,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 with: - go-version: "^1.23.4" + go-version: "^1.25.3" - name: Make dependencies run: make deps mocks - name: Compile diff --git a/CODEOWNERS b/CODEOWNERS index ca6f43a72..96213da15 100644 --- a/CODEOWNERS +++ b/CODEOWNERS @@ -1,2 +1,2 @@ # global owners -* @sdudoladov @Jan-M @FxKu @jopadi @idanovinda @hughcapet @macedigital +* @sdudoladov @Jan-M @FxKu @jopadi @idanovinda @hughcapet @mikkeloscar diff --git a/LICENSE b/LICENSE index b21099078..2141e8bcb 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2024 Zalando SE +Copyright (c) 2025 Zalando SE Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/MAINTAINERS b/MAINTAINERS index cc07af957..6ff2aa62e 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4,4 +4,4 @@ Jan Mussler Jociele Padilha Ida Novindasari Polina Bungina -Matthias Adler +Mikkel Larsen diff --git a/Makefile b/Makefile index 8fc4b36f6..385a03d09 100644 --- a/Makefile +++ b/Makefile @@ -12,7 +12,8 @@ LOCAL_BUILD_FLAGS ?= $(BUILD_FLAGS) LDFLAGS ?= -X=main.version=$(VERSION) DOCKERDIR = docker -IMAGE ?= registry.opensource.zalan.do/acid/$(BINARY) +BASE_IMAGE ?= alpine:latest +IMAGE ?= $(BINARY) TAG ?= $(VERSION) GITHEAD = $(shell git rev-parse --short HEAD) GITURL = $(shell git config --get remote.origin.url) @@ -42,8 +43,9 @@ ifndef GOPATH GOPATH := $(HOME)/go endif -PATH := $(GOPATH)/bin:$(PATH) -SHELL := env PATH=$(PATH) $(SHELL) +PATH := $(GOPATH)/bin:$(PATH) +SHELL := env PATH="$(PATH)" $(SHELL) +IMAGE_TAG := $(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_FRESH)$(DEBUG_POSTFIX) default: local @@ -66,19 +68,16 @@ docker: ${DOCKERDIR}/${DOCKERFILE} echo "Version ${VERSION}" echo "CDP tag ${CDP_TAG}" echo "git describe $(shell git describe --tags --always --dirty)" - docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_FRESH)$(DEBUG_POSTFIX)" -f "${DOCKERDIR}/${DOCKERFILE}" --build-arg VERSION="${VERSION}" . + docker build --rm -t "$(IMAGE_TAG)" -f "${DOCKERDIR}/${DOCKERFILE}" --build-arg VERSION="${VERSION}" --build-arg BASE_IMAGE="${BASE_IMAGE}" . indocker-race: - docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.23.4 bash -c "make linux" - -push: - docker push "$(IMAGE):$(TAG)$(CDP_TAG)" + docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.25.3 bash -c "make linux" mocks: GO111MODULE=on go generate ./... tools: - GO111MODULE=on go get k8s.io/client-go@kubernetes-1.30.4 + GO111MODULE=on go get k8s.io/client-go@kubernetes-1.32.9 GO111MODULE=on go install github.com/golang/mock/mockgen@v1.6.0 GO111MODULE=on go mod tidy diff --git a/README.md b/README.md index 9493115de..7d54e9fd9 100644 --- a/README.md +++ b/README.md @@ -17,6 +17,7 @@ pipelines with no access to Kubernetes API directly, promoting infrastructure as * Live volume resize without pod restarts (AWS EBS, PVC) * Database connection pooling with PGBouncer * Support fast in place major version upgrade. Supports global upgrade of all clusters. +* Pod protection during bootstrap phase and configurable maintenance windows * Restore and cloning Postgres clusters on AWS, GCS and Azure * Additionally logical backups to S3 or GCS bucket can be configured * Standby cluster from S3 or GCS WAL archive @@ -32,7 +33,7 @@ pipelines with no access to Kubernetes API directly, promoting infrastructure as * Streaming replication cluster via Patroni * Point-In-Time-Recovery with [pg_basebackup](https://www.postgresql.org/docs/17/app-pgbasebackup.html) / -[WAL-E](https://github.com/wal-e/wal-e) via [Spilo](https://github.com/zalando/spilo) +[WAL-G](https://github.com/wal-g/wal-g) or [WAL-E](https://github.com/wal-e/wal-e) via [Spilo](https://github.com/zalando/spilo) * Preload libraries: [bg_mon](https://github.com/CyberDem0n/bg_mon), [pg_stat_statements](https://www.postgresql.org/docs/17/pgstatstatements.html), [pgextwlist](https://github.com/dimitri/pgextwlist), @@ -41,12 +42,17 @@ pipelines with no access to Kubernetes API directly, promoting infrastructure as [decoderbufs](https://github.com/debezium/postgres-decoderbufs), [hypopg](https://github.com/HypoPG/hypopg), [pg_cron](https://github.com/citusdata/pg_cron), +[pg_repack](https://github.com/reorg/pg_repack), [pg_partman](https://github.com/pgpartman/pg_partman), [pg_stat_kcache](https://github.com/powa-team/pg_stat_kcache), +[pg_audit](https://github.com/pgaudit/pgaudit), +[pgfaceting](https://github.com/cybertec-postgresql/pgfaceting), [pgq](https://github.com/pgq/pgq), [pgvector](https://github.com/pgvector/pgvector), [plpgsql_check](https://github.com/okbob/plpgsql_check), +[plproxy](https://github.com/plproxy/plproxy), [postgis](https://postgis.net/), +[roaringbitmap](https://github.com/ChenHuajun/pg_roaringbitmap), [set_user](https://github.com/pgaudit/set_user) and [timescaledb](https://github.com/timescale/timescaledb) @@ -57,12 +63,12 @@ production for over five years. | Release | Postgres versions | K8s versions | Golang | | :-------- | :---------------: | :---------------: | :-----: | +| v1.15.1 | 13 → 17 | 1.27+ | 1.25.3 | | v1.14.0 | 13 → 17 | 1.27+ | 1.23.4 | | v1.13.0 | 12 → 16 | 1.27+ | 1.22.5 | | v1.12.0 | 11 → 16 | 1.27+ | 1.22.3 | | v1.11.0 | 11 → 16 | 1.27+ | 1.21.7 | | v1.10.1 | 10 → 15 | 1.21+ | 1.19.8 | -| v1.9.0 | 10 → 15 | 1.21+ | 1.18.9 | ## Getting started diff --git a/charts/postgres-operator-ui/Chart.yaml b/charts/postgres-operator-ui/Chart.yaml index f4e2adf95..871640467 100644 --- a/charts/postgres-operator-ui/Chart.yaml +++ b/charts/postgres-operator-ui/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: postgres-operator-ui -version: 1.14.0 -appVersion: 1.14.0 +version: 1.15.1 +appVersion: 1.15.1 home: https://github.com/zalando/postgres-operator description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience keywords: diff --git a/charts/postgres-operator-ui/index.yaml b/charts/postgres-operator-ui/index.yaml index dab9594e9..20408aeaf 100644 --- a/charts/postgres-operator-ui/index.yaml +++ b/charts/postgres-operator-ui/index.yaml @@ -1,9 +1,32 @@ apiVersion: v1 entries: postgres-operator-ui: + - apiVersion: v2 + appVersion: 1.15.1 + created: "2025-12-11T12:44:25.470723322+01:00" + description: Postgres Operator UI provides a graphical interface for a convenient + database-as-a-service user experience + digest: 4bbb750934366038d692711f924151182b7be131b6822d011f5a4e51cf609482 + home: https://github.com/zalando/postgres-operator + keywords: + - postgres + - operator + - ui + - cloud-native + - patroni + - spilo + maintainers: + - email: opensource@zalando.de + name: Zalando + name: postgres-operator-ui + sources: + - https://github.com/zalando/postgres-operator + urls: + - postgres-operator-ui-1.15.1.tgz + version: 1.15.1 - apiVersion: v2 appVersion: 1.14.0 - created: "2024-12-23T11:26:07.721761867+01:00" + created: "2025-12-11T12:44:25.468680645+01:00" description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience digest: e87ed898079a852957a67a4caf3fbd27b9098e413f5d961b7a771a6ae8b3e17c @@ -26,7 +49,7 @@ entries: version: 1.14.0 - apiVersion: v2 appVersion: 1.13.0 - created: "2024-12-23T11:26:07.719409282+01:00" + created: "2025-12-11T12:44:25.466716836+01:00" description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience digest: e0444e516b50f82002d1a733527813c51759a627cefdd1005cea73659f824ea8 @@ -49,7 +72,7 @@ entries: version: 1.13.0 - apiVersion: v2 appVersion: 1.12.2 - created: "2024-12-23T11:26:07.717202918+01:00" + created: "2025-12-11T12:44:25.464739895+01:00" description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience digest: cbcef400c23ccece27d97369ad629278265c013e0a45c0b7f33e7568a082fedd @@ -72,7 +95,7 @@ entries: version: 1.12.2 - apiVersion: v2 appVersion: 1.11.0 - created: "2024-12-23T11:26:07.714792146+01:00" + created: "2025-12-11T12:44:25.462698399+01:00" description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience digest: a45f2284045c2a9a79750a36997386444f39b01ac722b17c84b431457577a3a2 @@ -95,7 +118,7 @@ entries: version: 1.11.0 - apiVersion: v2 appVersion: 1.10.1 - created: "2024-12-23T11:26:07.712194397+01:00" + created: "2025-12-11T12:44:25.460357063+01:00" description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience digest: 2e5e7a82aebee519ec57c6243eb8735124aa4585a3a19c66ffd69638fbeb11ce @@ -116,27 +139,4 @@ entries: urls: - postgres-operator-ui-1.10.1.tgz version: 1.10.1 - - apiVersion: v2 - appVersion: 1.9.0 - created: "2024-12-23T11:26:07.723891496+01:00" - description: Postgres Operator UI provides a graphical interface for a convenient - database-as-a-service user experience - digest: df434af6c8b697fe0631017ecc25e3c79e125361ae6622347cea41a545153bdc - home: https://github.com/zalando/postgres-operator - keywords: - - postgres - - operator - - ui - - cloud-native - - patroni - - spilo - maintainers: - - email: opensource@zalando.de - name: Zalando - name: postgres-operator-ui - sources: - - https://github.com/zalando/postgres-operator - urls: - - postgres-operator-ui-1.9.0.tgz - version: 1.9.0 -generated: "2024-12-23T11:26:07.709192608+01:00" +generated: "2025-12-11T12:44:25.45732896+01:00" diff --git a/charts/postgres-operator-ui/postgres-operator-ui-1.15.1.tgz b/charts/postgres-operator-ui/postgres-operator-ui-1.15.1.tgz new file mode 100644 index 000000000..2eae35f92 Binary files /dev/null and b/charts/postgres-operator-ui/postgres-operator-ui-1.15.1.tgz differ diff --git a/charts/postgres-operator-ui/postgres-operator-ui-1.9.0.tgz b/charts/postgres-operator-ui/postgres-operator-ui-1.9.0.tgz deleted file mode 100644 index 7c04e3688..000000000 Binary files a/charts/postgres-operator-ui/postgres-operator-ui-1.9.0.tgz and /dev/null differ diff --git a/charts/postgres-operator-ui/values.yaml b/charts/postgres-operator-ui/values.yaml index da3c4baaf..c308335b2 100644 --- a/charts/postgres-operator-ui/values.yaml +++ b/charts/postgres-operator-ui/values.yaml @@ -8,7 +8,7 @@ replicaCount: 1 image: registry: ghcr.io repository: zalando/postgres-operator-ui - tag: v1.14.0 + tag: v1.15.1 pullPolicy: "IfNotPresent" # Optionally specify an array of imagePullSecrets. @@ -62,8 +62,6 @@ podAnnotations: extraEnvs: [] # Exemple of settings to make snapshot view working in the ui when using AWS - # - name: WALE_S3_ENDPOINT - # value: https+path://s3.us-east-1.amazonaws.com:443 # - name: SPILO_S3_BACKUP_PREFIX # value: spilo/ # - name: AWS_ACCESS_KEY_ID @@ -83,8 +81,6 @@ extraEnvs: # key: AWS_DEFAULT_REGION # - name: SPILO_S3_BACKUP_BUCKET # value: - # - name: "USE_AWS_INSTANCE_PROFILE" - # value: "true" # configure UI service service: diff --git a/charts/postgres-operator/Chart.yaml b/charts/postgres-operator/Chart.yaml index 35852c488..6f0d2e762 100644 --- a/charts/postgres-operator/Chart.yaml +++ b/charts/postgres-operator/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: postgres-operator -version: 1.14.0 -appVersion: 1.14.0 +version: 1.15.1 +appVersion: 1.15.1 home: https://github.com/zalando/postgres-operator description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes keywords: diff --git a/charts/postgres-operator/crds/operatorconfigurations.yaml b/charts/postgres-operator/crds/operatorconfigurations.yaml index 058769acf..58e84bd2f 100644 --- a/charts/postgres-operator/crds/operatorconfigurations.yaml +++ b/charts/postgres-operator/crds/operatorconfigurations.yaml @@ -68,7 +68,7 @@ spec: type: string docker_image: type: string - default: "ghcr.io/zalando/spilo-17:4.0-p2" + default: "ghcr.io/zalando/spilo-17:4.0-p3" enable_crd_registration: type: boolean default: true @@ -510,7 +510,7 @@ spec: pattern: '^(\d+m|\d+(\.\d{1,3})?)$' logical_backup_docker_image: type: string - default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0" + default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.15.1" logical_backup_google_application_credentials: type: string logical_backup_job_prefix: diff --git a/charts/postgres-operator/crds/postgresqls.yaml b/charts/postgres-operator/crds/postgresqls.yaml index 8083e5e1d..667c58efa 100644 --- a/charts/postgres-operator/crds/postgresqls.yaml +++ b/charts/postgres-operator/crds/postgresqls.yaml @@ -278,7 +278,6 @@ spec: items: type: string weight: - format: int32 type: integer requiredDuringSchedulingIgnoredDuringExecution: type: object diff --git a/charts/postgres-operator/index.yaml b/charts/postgres-operator/index.yaml index 4da98d70a..7128b8eb9 100644 --- a/charts/postgres-operator/index.yaml +++ b/charts/postgres-operator/index.yaml @@ -1,9 +1,53 @@ apiVersion: v1 entries: postgres-operator: + - apiVersion: v2 + appVersion: 1.15.1 + created: "2025-12-17T14:48:33.832345061+01:00" + description: Postgres Operator creates and manages PostgreSQL clusters running + in Kubernetes + digest: 9f3edc3d796105c02c04eaae28a78e58fb08c1847a9de012245fd6ac2c0d2c00 + home: https://github.com/zalando/postgres-operator + keywords: + - postgres + - operator + - cloud-native + - patroni + - spilo + maintainers: + - email: opensource@zalando.de + name: Zalando + name: postgres-operator + sources: + - https://github.com/zalando/postgres-operator + urls: + - postgres-operator-1.15.1.tgz + version: 1.15.1 + - apiVersion: v2 + appVersion: 1.15.0 + created: "2025-12-17T14:48:33.826117296+01:00" + description: Postgres Operator creates and manages PostgreSQL clusters running + in Kubernetes + digest: 002dd47647bf51fbba023bd1762d807be478cf37de7a44b80cd01ac1f20bd94a + home: https://github.com/zalando/postgres-operator + keywords: + - postgres + - operator + - cloud-native + - patroni + - spilo + maintainers: + - email: opensource@zalando.de + name: Zalando + name: postgres-operator + sources: + - https://github.com/zalando/postgres-operator + urls: + - postgres-operator-1.15.0.tgz + version: 1.15.0 - apiVersion: v2 appVersion: 1.14.0 - created: "2024-12-23T11:25:32.596716566+01:00" + created: "2025-12-17T14:48:33.819729144+01:00" description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes digest: 36e1571f3f455b213f16cdda7b1158648e8e84deb804ba47ed6b9b6d19263ba8 @@ -25,7 +69,7 @@ entries: version: 1.14.0 - apiVersion: v2 appVersion: 1.13.0 - created: "2024-12-23T11:25:32.591136261+01:00" + created: "2025-12-17T14:48:33.81038602+01:00" description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes digest: a839601689aea0a7e6bc0712a5244d435683cf3314c95794097ff08540e1dfef @@ -47,7 +91,7 @@ entries: version: 1.13.0 - apiVersion: v2 appVersion: 1.12.2 - created: "2024-12-23T11:25:32.585419709+01:00" + created: "2025-12-17T14:48:33.803256825+01:00" description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes digest: 65858d14a40d7fd90c32bd9fc60021acc9555c161079f43a365c70171eaf21d8 @@ -69,7 +113,7 @@ entries: version: 1.12.2 - apiVersion: v2 appVersion: 1.11.0 - created: "2024-12-23T11:25:32.580077286+01:00" + created: "2025-12-17T14:48:33.797369053+01:00" description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes digest: 3914b5e117bda0834f05c9207f007e2ac372864cf6e86dcc2e1362bbe46c14d9 @@ -91,7 +135,7 @@ entries: version: 1.11.0 - apiVersion: v2 appVersion: 1.10.1 - created: "2024-12-23T11:25:32.574641578+01:00" + created: "2025-12-17T14:48:33.791368349+01:00" description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes digest: cc3baa41753da92466223d0b334df27e79c882296577b404a8e9071411fcf19c @@ -111,26 +155,4 @@ entries: urls: - postgres-operator-1.10.1.tgz version: 1.10.1 - - apiVersion: v2 - appVersion: 1.9.0 - created: "2024-12-23T11:25:32.604748814+01:00" - description: Postgres Operator creates and manages PostgreSQL clusters running - in Kubernetes - digest: 64df90c898ca591eb3a330328173ffaadfbf9ddd474d8c42ed143edc9e3f4276 - home: https://github.com/zalando/postgres-operator - keywords: - - postgres - - operator - - cloud-native - - patroni - - spilo - maintainers: - - email: opensource@zalando.de - name: Zalando - name: postgres-operator - sources: - - https://github.com/zalando/postgres-operator - urls: - - postgres-operator-1.9.0.tgz - version: 1.9.0 -generated: "2024-12-23T11:25:32.568598763+01:00" +generated: "2025-12-17T14:48:33.785159183+01:00" diff --git a/charts/postgres-operator/postgres-operator-1.15.0.tgz b/charts/postgres-operator/postgres-operator-1.15.0.tgz new file mode 100644 index 000000000..e029732ae Binary files /dev/null and b/charts/postgres-operator/postgres-operator-1.15.0.tgz differ diff --git a/charts/postgres-operator/postgres-operator-1.15.1.tgz b/charts/postgres-operator/postgres-operator-1.15.1.tgz new file mode 100644 index 000000000..7dce6f87d Binary files /dev/null and b/charts/postgres-operator/postgres-operator-1.15.1.tgz differ diff --git a/charts/postgres-operator/postgres-operator-1.9.0.tgz b/charts/postgres-operator/postgres-operator-1.9.0.tgz deleted file mode 100644 index 8106bcf15..000000000 Binary files a/charts/postgres-operator/postgres-operator-1.9.0.tgz and /dev/null differ diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index bf94b63d0..426e4267d 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -1,7 +1,7 @@ image: registry: ghcr.io repository: zalando/postgres-operator - tag: v1.14.0 + tag: v1.15.1 pullPolicy: "IfNotPresent" # Optionally specify an array of imagePullSecrets. @@ -38,7 +38,7 @@ configGeneral: # etcd connection string for Patroni. Empty uses K8s-native DCS. etcd_host: "" # Spilo docker image - docker_image: ghcr.io/zalando/spilo-17:4.0-p2 + docker_image: ghcr.io/zalando/spilo-17:4.0-p3 # key name for annotation to ignore globally configured instance limits # ignore_instance_limits_annotation_key: "" @@ -364,7 +364,7 @@ configLogicalBackup: # logical_backup_memory_request: "" # image for pods of the logical backup job (example runs pg_dumpall) - logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0" + logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.15.1" # path of google cloud service account json file # logical_backup_google_application_credentials: "" diff --git a/delivery.yaml b/delivery.yaml index 7eacd769b..9971d4070 100644 --- a/delivery.yaml +++ b/delivery.yaml @@ -1,72 +1,108 @@ version: "2017-09-20" +allow_concurrent_steps: true + +build_env: &BUILD_ENV + PYTHON_BASE_IMAGE: container-registry.zalando.net/library/python-3.11-slim + ALPINE_BASE_IMAGE: container-registry.zalando.net/library/alpine-3 + MULTI_ARCH_REGISTRY: container-registry-test.zalando.net/acid + pipeline: - - id: build-postgres-operator - type: script - vm_config: - type: linux - size: large - image: cdp-runtime/go - cache: - paths: - - /go/pkg/mod # pkg cache for Go modules - - ~/.cache/go-build # Go build cache - commands: - - desc: Run unit tests - cmd: | - make deps mocks test + - id: build-postgres-operator + env: + <<: *BUILD_ENV + type: script + vm_config: + type: linux + size: large + image: cdp-runtime/go + cache: + paths: + - /go/pkg/mod # pkg cache for Go modules + - ~/.cache/go-build # Go build cache + commands: + - desc: Run unit tests + cmd: | + make deps mocks test - - desc: Build Docker image - cmd: | - IS_PR_BUILD=${CDP_PULL_REQUEST_NUMBER+"true"} - if [[ ${CDP_TARGET_BRANCH} == "master" && ${IS_PR_BUILD} != "true" ]] - then - IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator - else - IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator-test - fi - export IMAGE - make docker push + - desc: Build Docker image + cmd: | + if [ -z ${CDP_SOURCE_BRANCH} ]; then + IMAGE=${MULTI_ARCH_REGISTRY}/postgres-operator + else + IMAGE=${MULTI_ARCH_REGISTRY}/postgres-operator-test + fi - - id: build-operator-ui - type: script - vm_config: - type: linux + docker buildx create --config /etc/cdp-buildkitd.toml --driver-opt network=host --bootstrap --use + docker buildx build --platform "linux/amd64,linux/arm64" \ + --build-arg BASE_IMAGE="${ALPINE_BASE_IMAGE}" \ + -t "${IMAGE}:${CDP_BUILD_VERSION}" \ + -f docker/Dockerfile \ + --push . - commands: - - desc: 'Prepare environment' - cmd: | - apt-get update - apt-get install -y build-essential + if [ -z ${CDP_SOURCE_BRANCH} ]; then + cdp-promote-image ${IMAGE}:${CDP_BUILD_VERSION} + fi - - desc: 'Compile JavaScript app' - cmd: | - cd ui - make appjs + - id: build-operator-ui + env: + <<: *BUILD_ENV + type: script + vm_config: + type: linux - - desc: 'Build and push Docker image' - cmd: | - cd ui - IS_PR_BUILD=${CDP_PULL_REQUEST_NUMBER+"true"} - if [[ ${CDP_TARGET_BRANCH} == "master" && ${IS_PR_BUILD} != "true" ]] - then - IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator-ui - else - IMAGE=registry-write.opensource.zalan.do/acid/postgres-operator-ui-test - fi - export IMAGE - make docker - make push + commands: + - desc: 'Prepare environment' + cmd: | + apt-get update + apt-get install -y build-essential - - id: build-logical-backup - type: script - vm_config: - type: linux + - desc: 'Compile JavaScript app' + cmd: | + cd ui + make appjs - commands: - - desc: Build image - cmd: | - cd logical-backup - export TAG=$(git describe --tags --always --dirty) - IMAGE="registry-write.opensource.zalan.do/acid/logical-backup" - docker build --rm -t "$IMAGE:$TAG$CDP_TAG" . - docker push "$IMAGE:$TAG$CDP_TAG" + - desc: 'Build and push Docker image' + cmd: | + cd ui + if [ -z ${CDP_SOURCE_BRANCH} ]; then + IMAGE=${MULTI_ARCH_REGISTRY}/postgres-operator-ui + else + IMAGE=${MULTI_ARCH_REGISTRY}/postgres-operator-ui-test + fi + + make appjs + docker buildx create --config /etc/cdp-buildkitd.toml --driver-opt network=host --bootstrap --use + docker buildx build --platform linux/amd64,linux/arm64 \ + --build-arg BASE_IMAGE="${PYTHON_BASE_IMAGE}" \ + -t ${IMAGE}:${CDP_BUILD_VERSION} \ + --push . + + if [ -z ${CDP_SOURCE_BRANCH} ]; then + cdp-promote-image ${IMAGE}:${CDP_BUILD_VERSION} + fi + + - id: build-logical-backup + env: + <<: *BUILD_ENV + type: script + vm_config: + type: linux + + commands: + - desc: Build image + cmd: | + cd logical-backup + if [ -z ${CDP_SOURCE_BRANCH} ]; then + IMAGE=${MULTI_ARCH_REGISTRY}/logical-backup + else + IMAGE=${MULTI_ARCH_REGISTRY}/logical-backup-test + fi + + docker buildx create --config /etc/cdp-buildkitd.toml --driver-opt network=host --bootstrap --use + docker buildx build --platform linux/amd64,linux/arm64 \ + -t ${IMAGE}:${CDP_BUILD_VERSION} \ + --push . + + if [ -z ${CDP_SOURCE_BRANCH} ]; then + cdp-promote-image ${IMAGE}:${CDP_BUILD_VERSION} + fi diff --git a/docker/DebugDockerfile b/docker/DebugDockerfile index 18cb631fe..c44002984 100644 --- a/docker/DebugDockerfile +++ b/docker/DebugDockerfile @@ -1,4 +1,4 @@ -FROM golang:1.23-alpine +FROM golang:1.25-alpine LABEL maintainer="Team ACID @ Zalando " # We need root certificates to deal with teams api over https diff --git a/docker/Dockerfile b/docker/Dockerfile index 1fd2020d8..9eef4e68c 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,5 +1,5 @@ -ARG BASE_IMAGE=registry.opensource.zalan.do/library/alpine-3:latest -FROM golang:1.23-alpine AS builder +ARG BASE_IMAGE=alpine:latest +FROM golang:1.25-alpine AS builder ARG VERSION=latest COPY . /go/src/github.com/zalando/postgres-operator diff --git a/docker/build_operator.sh b/docker/build_operator.sh index 6c1817b1b..5abe56666 100644 --- a/docker/build_operator.sh +++ b/docker/build_operator.sh @@ -13,7 +13,7 @@ apt-get install -y wget ( cd /tmp - wget -q "https://storage.googleapis.com/golang/go1.23.4.linux-${arch}.tar.gz" -O go.tar.gz + wget -q "https://storage.googleapis.com/golang/go1.25.3.linux-${arch}.tar.gz" -O go.tar.gz tar -xf go.tar.gz mv go /usr/local ln -s /usr/local/go/bin/go /usr/bin/go diff --git a/docs/administrator.md b/docs/administrator.md index 4f72870f9..d7b5a9ecc 100644 --- a/docs/administrator.md +++ b/docs/administrator.md @@ -195,12 +195,14 @@ from numerous escape characters in the latter log entry, view it in CLI with used internally in K8s. The StatefulSet is replaced if the following properties change: + - annotations - volumeClaimTemplates - template volumes The StatefulSet is replaced and a rolling updates is triggered if the following properties differ between the old and new state: + - container name, ports, image, resources, env, envFrom, securityContext and volumeMounts - template labels, annotations, service account, securityContext, affinity, priority class and termination grace period @@ -384,7 +386,7 @@ exceptions: The interval of days can be set with `password_rotation_interval` (default `90` = 90 days, minimum 1). On each rotation the user name and password values are replaced in the K8s secret. They belong to a newly created user named after -the original role plus rotation date in YYMMDD format. All priviliges are +the original role plus rotation date in YYMMDD format. All privileges are inherited meaning that migration scripts should still grant and revoke rights against the original role. The timestamp of the next rotation (in RFC 3339 format, UTC timezone) is written to the secret as well. Note, if the rotation @@ -564,7 +566,7 @@ manifest affinity. ``` If `node_readiness_label_merge` is set to `"OR"` (default) the readiness label -affinty will be appended with its own expressions block: +affinity will be appended with its own expressions block: ```yaml affinity: @@ -620,22 +622,34 @@ By default the topology key for the pod anti affinity is set to `kubernetes.io/hostname`, you can set another topology key e.g. `topology.kubernetes.io/zone`. See [built-in node labels](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#interlude-built-in-node-labels) for available topology keys. -## Pod Disruption Budget +## Pod Disruption Budgets -By default the operator uses a PodDisruptionBudget (PDB) to protect the cluster -from voluntarily disruptions and hence unwanted DB downtime. The `MinAvailable` -parameter of the PDB is set to `1` which prevents killing masters in single-node -clusters and/or the last remaining running instance in a multi-node cluster. +By default the operator creates two PodDisruptionBudgets (PDB) to protect the cluster +from voluntarily disruptions and hence unwanted DB downtime: so-called primary PDB and +and PDB for critical operations. + +### Primary PDB +The `MinAvailable` parameter of this PDB is set to `1` and, if `pdb_master_label_selector` +is enabled, label selector includes `spilo-role=master` condition, which prevents killing +masters in single-node clusters and/or the last remaining running instance in a multi-node +cluster. + +## PDB for critical operations +The `MinAvailable` parameter of this PDB is equal to the `numberOfInstances` set in the +cluster manifest, while label selector includes `critical-operation=true` condition. This +allows to protect all pods of a cluster, given they are labeled accordingly. +For example, Operator labels all Spilo pods with `critical-operation=true` during the major +version upgrade run. You may want to protect cluster pods during other critical operations +by assigning the label to pods yourself or using other means of automation. The PDB is only relaxed in two scenarios: * If a cluster is scaled down to `0` instances (e.g. for draining nodes) * If the PDB is disabled in the configuration (`enable_pod_disruption_budget`) -The PDB is still in place having `MinAvailable` set to `0`. If enabled it will -be automatically set to `1` on scale up. Disabling PDBs helps avoiding blocking -Kubernetes upgrades in managed K8s environments at the cost of prolonged DB -downtime. See PR [#384](https://github.com/zalando/postgres-operator/pull/384) +The PDBs are still in place having `MinAvailable` set to `0`. Disabling PDBs +helps avoiding blocking Kubernetes upgrades in managed K8s environments at the +cost of prolonged DB downtime. See PR [#384](https://github.com/zalando/postgres-operator/pull/384) for the use case. ## Add cluster-specific labels @@ -886,6 +900,7 @@ services: There are multiple options to specify service annotations that will be merged with each other and override in the following order (where latter take precedence): + 1. Default annotations if LoadBalancer is enabled 2. Globally configured `custom_service_annotations` 3. `serviceAnnotations` specified in the cluster manifest @@ -1128,7 +1143,7 @@ metadata: iam.gke.io/gcp-service-account: @.iam.gserviceaccount.com ``` -2. Specify the new custom service account in your [operator paramaters](./reference/operator_parameters.md) +2. Specify the new custom service account in your [operator parameters](./reference/operator_parameters.md) If using manual deployment or kustomize, this is done by setting `pod_service_account_name` in your configuration file specified in the @@ -1483,7 +1498,7 @@ make docker # build in image in minikube docker env eval $(minikube docker-env) -docker build -t ghcr.io/zalando/postgres-operator-ui:v1.13.0 . +docker build -t ghcr.io/zalando/postgres-operator-ui:v1.15.1 . # apply UI manifests next to a running Postgres Operator kubectl apply -f manifests/ diff --git a/docs/developer.md b/docs/developer.md index c006aded0..b5f59c246 100644 --- a/docs/developer.md +++ b/docs/developer.md @@ -16,7 +16,7 @@ under the ~/go/src sub directories. Given the schema above, the Postgres Operator source code located at `github.com/zalando/postgres-operator` should be put at --`~/go/src/github.com/zalando/postgres-operator`. +`~/go/src/github.com/zalando/postgres-operator`. ```bash export GOPATH=~/go @@ -43,7 +43,7 @@ Build the operator with the `make docker` command. You may define the TAG variable to assign an explicit tag to your Docker image and the IMAGE to set the image name. By default, the tag is computed with `git describe --tags --always --dirty` and the image is -`registry.opensource.zalan.do/acid/postgres-operator` +`ghcr.io/zalando/postgres-operator` ```bash export TAG=$(git describe --tags --always --dirty) @@ -72,7 +72,7 @@ make docker # kind make docker -kind load docker-image registry.opensource.zalan.do/acid/postgres-operator:${TAG} --name +kind load docker-image ghcr.io/zalando/postgres-operator:${TAG} --name ``` Then create a new Postgres Operator deployment. @@ -105,6 +105,7 @@ and K8s-like APIs for its custom resource definitions, namely the Postgres CRD and the operator CRD. The usage of the code generation follows conventions from the K8s community. Relevant scripts live in the `hack` directory: + * `update-codegen.sh` triggers code generation for the APIs defined in `pkg/apis/acid.zalan.do/`, * `verify-codegen.sh` checks if the generated code is up-to-date (to be used within CI). @@ -112,6 +113,7 @@ The `/pkg/generated/` contains the resultant code. To make these scripts work, you may need to `export GOPATH=$(go env GOPATH)` References for code generation are: + * [Relevant pull request](https://github.com/zalando/postgres-operator/pull/369) See comments there for minor issues that can sometimes broke the generation process. * [Code generator source code](https://github.com/kubernetes/code-generator) @@ -274,10 +276,10 @@ Examples for fake K8s objects can be found in: The operator provides reference end-to-end (e2e) tests to ensure various infrastructure parts work smoothly together. The test code is available at `e2e/tests`. -The special `registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner` image is used to run the tests. The container mounts the local `e2e/tests` directory at runtime, so whatever you modify in your local copy of the tests will be executed by a test runner. By maintaining a separate test runner image we avoid the need to re-build the e2e test image on every build. +The special `ghcr.io/zalando/postgres-operator-e2e-tests-runner` image is used to run the tests. The container mounts the local `e2e/tests` directory at runtime, so whatever you modify in your local copy of the tests will be executed by a test runner. By maintaining a separate test runner image we avoid the need to re-build the e2e test image on every build. -Each e2e execution tests a Postgres Operator image built from the current git branch. The test -runner creates a new local K8s cluster using [kind](https://kind.sigs.k8s.io/), +Each e2e execution tests a Postgres Operator image built from the current git branch. +The test runner creates a new local K8s cluster using [kind](https://kind.sigs.k8s.io/), utilizes provided manifest examples, and runs e2e tests contained in the `tests` folder. The K8s API client in the container connects to the `kind` cluster via the standard Docker `bridge` network. The kind cluster is deleted if tests @@ -315,6 +317,7 @@ precedence. Update the following Go files that obtain the configuration parameter from the manifest files: + * [operator_configuration_type.go](https://github.com/zalando/postgres-operator/blob/master/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go) * [operator_config.go](https://github.com/zalando/postgres-operator/blob/master/pkg/controller/operator_config.go) * [config.go](https://github.com/zalando/postgres-operator/blob/master/pkg/util/config/config.go) @@ -323,6 +326,7 @@ Postgres manifest parameters are defined in the [api package](https://github.com The operator behavior has to be implemented at least in [k8sres.go](https://github.com/zalando/postgres-operator/blob/master/pkg/cluster/k8sres.go). Validation of CRD parameters is controlled in [crds.go](https://github.com/zalando/postgres-operator/blob/master/pkg/apis/acid.zalan.do/v1/crds.go). Please, reflect your changes in tests, for example in: + * [config_test.go](https://github.com/zalando/postgres-operator/blob/master/pkg/util/config/config_test.go) * [k8sres_test.go](https://github.com/zalando/postgres-operator/blob/master/pkg/cluster/k8sres_test.go) * [util_test.go](https://github.com/zalando/postgres-operator/blob/master/pkg/apis/acid.zalan.do/v1/util_test.go) @@ -330,6 +334,7 @@ Please, reflect your changes in tests, for example in: ### Updating manifest files For the CRD-based configuration, please update the following files: + * the default [OperatorConfiguration](https://github.com/zalando/postgres-operator/blob/master/manifests/postgresql-operator-default-configuration.yaml) * the CRD's [validation](https://github.com/zalando/postgres-operator/blob/master/manifests/operatorconfiguration.crd.yaml) * the CRD's validation in the [Helm chart](https://github.com/zalando/postgres-operator/blob/master/charts/postgres-operator/crds/operatorconfigurations.yaml) @@ -342,6 +347,7 @@ Last but no least, update the [ConfigMap](https://github.com/zalando/postgres-op Finally, add a section for each new configuration option and/or cluster manifest parameter in the reference documents: + * [config reference](reference/operator_parameters.md) * [manifest reference](reference/cluster_manifest.md) diff --git a/docs/quickstart.md b/docs/quickstart.md index f080bd567..fa16d1813 100644 --- a/docs/quickstart.md +++ b/docs/quickstart.md @@ -10,7 +10,7 @@ hence set it up first. For local tests we recommend to use one of the following solutions: * [minikube](https://github.com/kubernetes/minikube/releases), which creates a - single-node K8s cluster inside a VM (requires KVM or VirtualBox), + K8s cluster inside a container or VM (requires Docker, KVM, Hyper-V, HyperKit, VirtualBox, or similar), * [kind](https://kind.sigs.k8s.io/) and [k3d](https://k3d.io), which allows creating multi-nodes K8s clusters running on Docker (requires Docker) @@ -20,7 +20,7 @@ This quickstart assumes that you have started minikube or created a local kind cluster. Note that you can also use built-in K8s support in the Docker Desktop for Mac to follow the steps of this tutorial. You would have to replace `minikube start` and `minikube delete` with your launch actions for the Docker -built-in K8s support. +Desktop built-in K8s support. ## Configuration Options @@ -230,7 +230,7 @@ kubectl delete postgresql acid-minimal-cluster ``` This should remove the associated StatefulSet, database Pods, Services and -Endpoints. The PersistentVolumes are released and the PodDisruptionBudget is +Endpoints. The PersistentVolumes are released and the PodDisruptionBudgets are deleted. Secrets however are not deleted and backups will remain in place. When deleting a cluster while it is still starting up or got stuck during that diff --git a/docs/reference/cluster_manifest.md b/docs/reference/cluster_manifest.md index 8d02ee7d8..ab0353202 100644 --- a/docs/reference/cluster_manifest.md +++ b/docs/reference/cluster_manifest.md @@ -116,9 +116,9 @@ These parameters are grouped directly under the `spec` key in the manifest. * **maintenanceWindows** a list which defines specific time frames when certain maintenance operations - are allowed. So far, it is only implemented for automatic major version - upgrades. Accepted formats are "01:00-06:00" for daily maintenance windows or - "Sat:00:00-04:00" for specific days, with all times in UTC. + such as automatic major upgrades or master pod migration. Accepted formats + are "01:00-06:00" for daily maintenance windows or "Sat:00:00-04:00" for specific + days, with all times in UTC. * **users** a map of usernames to user flags for the users that should be created in the @@ -247,7 +247,7 @@ These parameters are grouped directly under the `spec` key in the manifest. [kubernetes volumeSource](https://godoc.org/k8s.io/api/core/v1#VolumeSource). It allows you to mount existing PersistentVolumeClaims, ConfigMaps and Secrets inside the StatefulSet. Also an `emptyDir` volume can be shared between initContainer and statefulSet. - Additionaly, you can provide a `SubPath` for volume mount (a file in a configMap source volume, for example). + Additionally, you can provide a `SubPath` for volume mount (a file in a configMap source volume, for example). Set `isSubPathExpr` to true if you want to include [API environment variables](https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath-expanded-environment). You can also specify in which container the additional Volumes will be mounted with the `targetContainers` array option. If `targetContainers` is empty, additional volumes will be mounted only in the `postgres` container. @@ -257,7 +257,7 @@ These parameters are grouped directly under the `spec` key in the manifest. ## Prepared Databases The operator can create databases with default owner, reader and writer roles -without the need to specifiy them under `users` or `databases` sections. Those +without the need to specify them under `users` or `databases` sections. Those parameters are grouped under the `preparedDatabases` top-level key. For more information, see [user docs](../user.md#prepared-databases-with-roles-and-default-privileges). diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index 3bd9e44f7..5662d6b8e 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -107,8 +107,13 @@ Those are top-level keys, containing both leaf keys and groups. * **kubernetes_use_configmaps** Select if setup uses endpoints (default), or configmaps to manage leader when DCS is kubernetes (not etcd or similar). In OpenShift it is not possible to - use endpoints option, and configmaps is required. By default, - `kubernetes_use_configmaps: false`, meaning endpoints will be used. + use endpoints option, and configmaps is required. Starting with K8s 1.33, + endpoints are marked as deprecated. It's recommended to switch to config maps + instead. But, to do so make sure you scale the Postgres cluster down to just + one primary pod (e.g. using `max_instances` option). Otherwise, you risk + running into a split-brain scenario. + By default, `kubernetes_use_configmaps: false`, meaning endpoints will be used. + Starting from v1.16.0 the default will be changed to `true`. * **docker_image** Spilo Docker image for Postgres instances. For production, don't rely on the @@ -209,7 +214,7 @@ under the `users` key. For all `LOGIN` roles that are not database owners the operator can rotate credentials in the corresponding K8s secrets by replacing the username and password. This means, new users will be added on each rotation inheriting - all priviliges from the original roles. The rotation date (in YYMMDD format) + all privileges from the original roles. The rotation date (in YYMMDD format) is appended to the names of the new user. The timestamp of the next rotation is written to the secret. The default is `false`. @@ -334,13 +339,13 @@ configuration they are grouped under the `kubernetes` key. pod namespace). * **pdb_name_format** - defines the template for PDB (Pod Disruption Budget) names created by the + defines the template for primary PDB (Pod Disruption Budget) name created by the operator. The default is `postgres-{cluster}-pdb`, where `{cluster}` is replaced by the cluster name. Only the `{cluster}` placeholders is allowed in the template. * **pdb_master_label_selector** - By default the PDB will match the master role hence preventing nodes to be + By default the primary PDB will match the master role hence preventing nodes to be drained if the node_readiness_label is not used. If this option if set to `false` the `spilo-role=master` selector will not be added to the PDB. @@ -552,7 +557,7 @@ configuration they are grouped under the `kubernetes` key. pods with `InitialDelaySeconds: 6`, `PeriodSeconds: 10`, `TimeoutSeconds: 5`, `SuccessThreshold: 1` and `FailureThreshold: 3`. When enabling readiness probes it is recommended to switch the `pod_management_policy` to `parallel` - to avoid unneccesary waiting times in case of multiple instances failing. + to avoid unnecessary waiting times in case of multiple instances failing. The default is `false`. * **storage_resize_mode** @@ -701,7 +706,7 @@ In the CRD-based configuration they are grouped under the `load_balancer` key. replaced by the cluster name, `{namespace}` is replaced with the namespace and `{hostedzone}` is replaced with the hosted zone (the value of the `db_hosted_zone` parameter). The `{team}` placeholder can still be used, - although it is not recommened because the team of a cluster can change. + although it is not recommended because the team of a cluster can change. If the cluster name starts with the `teamId` it will also be part of the DNS, aynway. No other placeholders are allowed! @@ -720,7 +725,7 @@ In the CRD-based configuration they are grouped under the `load_balancer` key. is replaced by the cluster name, `{namespace}` is replaced with the namespace and `{hostedzone}` is replaced with the hosted zone (the value of the `db_hosted_zone` parameter). The `{team}` placeholder can still be used, - although it is not recommened because the team of a cluster can change. + although it is not recommended because the team of a cluster can change. If the cluster name starts with the `teamId` it will also be part of the DNS, aynway. No other placeholders are allowed! @@ -819,7 +824,7 @@ grouped under the `logical_backup` key. runs `pg_dumpall` on a replica if possible and uploads compressed results to an S3 bucket under the key `////logical_backups`. The default image is the same image built with the Zalando-internal CI - pipeline. Default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0" + pipeline. Default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.15.1" * **logical_backup_google_application_credentials** Specifies the path of the google cloud service account json file. Default is empty. diff --git a/docs/user.md b/docs/user.md index c63e43f57..c1a7c7d45 100644 --- a/docs/user.md +++ b/docs/user.md @@ -900,7 +900,7 @@ the PostgreSQL version between source and target cluster has to be the same. To start a cluster as standby, add the following `standby` section in the YAML file. You can stream changes from archived WAL files (AWS S3 or Google Cloud -Storage) or from a remote primary. Only one option can be specfied in the +Storage) or from a remote primary. Only one option can be specified in the manifest: ```yaml @@ -911,7 +911,7 @@ spec: For GCS, you have to define STANDBY_GOOGLE_APPLICATION_CREDENTIALS as a [custom pod environment variable](administrator.md#custom-pod-environment-variables). -It is not set from the config to allow for overridding. +It is not set from the config to allow for overriding. ```yaml spec: @@ -1282,7 +1282,7 @@ minutes if the certificates have changed and reloads postgres accordingly. ### TLS certificates for connection pooler By default, the pgBouncer image generates its own TLS certificate like Spilo. -When the `tls` section is specfied in the manifest it will be used for the +When the `tls` section is specified in the manifest it will be used for the connection pooler pod(s) as well. The security context options are hard coded to `runAsUser: 100` and `runAsGroup: 101`. The `fsGroup` will be the same like for Spilo. diff --git a/e2e/Dockerfile b/e2e/Dockerfile index cfbc9eff7..98bbf755a 100644 --- a/e2e/Dockerfile +++ b/e2e/Dockerfile @@ -1,27 +1,20 @@ # An image to run e2e tests. # The image does not include the tests; all necessary files are bind-mounted when a container starts. -FROM ubuntu:20.04 +FROM python:3.11-slim LABEL maintainer="Team ACID @ Zalando " ENV TERM xterm-256color -COPY requirements.txt ./ - -RUN apt-get update \ - && apt-get install --no-install-recommends -y \ - python3 \ - python3-setuptools \ - python3-pip \ - curl \ - vim \ - && pip3 install --no-cache-dir -r requirements.txt \ - && curl -LO https://storage.googleapis.com/kubernetes-release/release/v1.24.3/bin/linux/amd64/kubectl \ +RUN apt-get -qq -y update \ + # https://www.psycopg.org/docs/install.html#psycopg-vs-psycopg-binary + && apt-get -qq -y install --no-install-recommends curl vim python3-dev \ + && curl -LO https://dl.k8s.io/release/v1.32.9/bin/linux/amd64/kubectl \ && chmod +x ./kubectl \ && mv ./kubectl /usr/local/bin/kubectl \ - && apt-get clean \ + && apt-get -qq -y clean \ && rm -rf /var/lib/apt/lists/* -# working line -# python3 -m unittest discover -v --failfast -k test_e2e.EndToEndTestCase.test_lazy_spilo_upgrade --start-directory tests -ENTRYPOINT ["python3", "-m", "unittest"] -CMD ["discover","-v","--failfast","--start-directory","/tests"] \ No newline at end of file +COPY requirements.txt ./ +RUN pip install -r ./requirements.txt + +CMD ["python", "-m", "unittest", "discover", "-v", "--failfast", "--start-directory", "/tests"] \ No newline at end of file diff --git a/e2e/Makefile b/e2e/Makefile index 52d24e9e5..5fa0de471 100644 --- a/e2e/Makefile +++ b/e2e/Makefile @@ -11,7 +11,7 @@ endif LOCAL_BUILD_FLAGS ?= $(BUILD_FLAGS) LDFLAGS ?= -X=main.version=$(VERSION) -IMAGE ?= registry.opensource.zalan.do/acid/$(BINARY) +IMAGE ?= ghcr.io/zalando/$(BINARY) VERSION ?= $(shell git describe --tags --always --dirty) TAG ?= $(VERSION) GITHEAD = $(shell git rev-parse --short HEAD) @@ -46,7 +46,7 @@ tools: # install pinned version of 'kind' # go install must run outside of a dir with a (module-based) Go project ! # otherwise go install updates project's dependencies and/or behaves differently - cd "/tmp" && GO111MODULE=on go install sigs.k8s.io/kind@v0.24.0 + cd "/tmp" && GO111MODULE=on go install sigs.k8s.io/kind@v0.27.0 e2etest: tools copy clean ./run.sh main diff --git a/e2e/exec_into_env.sh b/e2e/exec_into_env.sh index 59acbeeb4..a46efecbd 100755 --- a/e2e/exec_into_env.sh +++ b/e2e/exec_into_env.sh @@ -2,8 +2,8 @@ export cluster_name="postgres-operator-e2e-tests" export kubeconfig_path="/tmp/kind-config-${cluster_name}" -export operator_image="registry.opensource.zalan.do/acid/postgres-operator:latest" -export e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.4" +export operator_image="ghcr.io/zalando/postgres-operator:latest" +export e2e_test_runner_image="ghcr.io/zalando/postgres-operator-e2e-tests-runner:latest" docker run -it --entrypoint /bin/bash --network=host -e "TERM=xterm-256color" \ --mount type=bind,source="$(readlink -f ${kubeconfig_path})",target=/root/.kube/config \ diff --git a/e2e/requirements.txt b/e2e/requirements.txt index d904585be..30b656552 100644 --- a/e2e/requirements.txt +++ b/e2e/requirements.txt @@ -1,3 +1,3 @@ -kubernetes==29.2.0 +kubernetes==31.0.0 timeout_decorator==0.5.0 -pyyaml==6.0.1 +pyyaml==6.0.3 diff --git a/e2e/run.sh b/e2e/run.sh index d289cb3f4..b0f13f92e 100755 --- a/e2e/run.sh +++ b/e2e/run.sh @@ -9,7 +9,7 @@ IFS=$'\n\t' readonly cluster_name="postgres-operator-e2e-tests" readonly kubeconfig_path="/tmp/kind-config-${cluster_name}" readonly spilo_image="registry.opensource.zalan.do/acid/spilo-17-e2e:0.3" -readonly e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.4" +readonly e2e_test_runner_image="ghcr.io/zalando/postgres-operator-e2e-tests-runner:latest" export GOPATH=${GOPATH-~/go} export PATH=${GOPATH}/bin:$PATH @@ -19,11 +19,11 @@ echo "Kubeconfig path: ${kubeconfig_path}" function pull_images(){ operator_tag=$(git describe --tags --always --dirty) - if [[ -z $(docker images -q registry.opensource.zalan.do/acid/postgres-operator:${operator_tag}) ]] + if [[ -z $(docker images -q ghcr.io/zalando/postgres-operator:${operator_tag}) ]] then - docker pull registry.opensource.zalan.do/acid/postgres-operator:latest + docker pull ghcr.io/zalando/postgres-operator:latest fi - operator_image=$(docker images --filter=reference="registry.opensource.zalan.do/acid/postgres-operator" --format "{{.Repository}}:{{.Tag}}" | head -1) + operator_image=$(docker images --filter=reference="ghcr.io/zalando/postgres-operator" --format "{{.Repository}}:{{.Tag}}" | head -1) } function start_kind(){ diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index 04c6465c9..f473b5cc4 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -14,7 +14,7 @@ from kubernetes.client.rest import ApiException SPILO_CURRENT = "registry.opensource.zalan.do/acid/spilo-17-e2e:0.3" SPILO_LAZY = "registry.opensource.zalan.do/acid/spilo-17-e2e:0.4" -SPILO_FULL_IMAGE = "ghcr.io/zalando/spilo-17:4.0-p2" +SPILO_FULL_IMAGE = "ghcr.io/zalando/spilo-17:4.0-p3" def to_selector(labels): return ",".join(["=".join(lbl) for lbl in labels.items()]) @@ -1003,7 +1003,8 @@ class EndToEndTestCase(unittest.TestCase): "Origin": 2, "IsDbOwner": False, "Deleted": False, - "Rotated": False + "Rotated": False, + "Degraded": False, }) return True except: @@ -1187,7 +1188,7 @@ class EndToEndTestCase(unittest.TestCase): Test major version upgrade: with full upgrade, maintenance window, and annotation """ def check_version(): - p = k8s.patroni_rest("acid-upgrade-test-0", "") + p = k8s.patroni_rest("acid-upgrade-test-0", "") or {} version = p.get("server_version", 0) // 10000 return version @@ -1237,7 +1238,7 @@ class EndToEndTestCase(unittest.TestCase): # should not upgrade because current time is not in maintenanceWindow current_time = datetime.now() maintenance_window_future = f"{(current_time+timedelta(minutes=60)).strftime('%H:%M')}-{(current_time+timedelta(minutes=120)).strftime('%H:%M')}" - pg_patch_version_15 = { + pg_patch_version_15_outside_mw = { "spec": { "postgresql": { "version": "15" @@ -1248,10 +1249,10 @@ class EndToEndTestCase(unittest.TestCase): } } k8s.api.custom_objects_api.patch_namespaced_custom_object( - "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15) + "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15_outside_mw) self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") - k8s.wait_for_pod_failover(master_nodes, 'spilo-role=master,' + cluster_label) + # no pod replacement outside of the maintenance window k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) self.eventuallyEqual(check_version, 14, "Version should not be upgraded") @@ -1259,12 +1260,12 @@ class EndToEndTestCase(unittest.TestCase): second_annotations = get_annotations() self.assertIsNone(second_annotations.get("last-major-upgrade-failure"), "Annotation for last upgrade's failure should not be set") - # change the version again to trigger operator sync + # change maintenanceWindows to current maintenance_window_current = f"{(current_time-timedelta(minutes=30)).strftime('%H:%M')}-{(current_time+timedelta(minutes=30)).strftime('%H:%M')}" - pg_patch_version_16 = { + pg_patch_version_15_in_mw = { "spec": { "postgresql": { - "version": "16" + "version": "15" }, "maintenanceWindows": [ maintenance_window_current @@ -1273,13 +1274,13 @@ class EndToEndTestCase(unittest.TestCase): } k8s.api.custom_objects_api.patch_namespaced_custom_object( - "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_16) + "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15_in_mw) self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") - k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) + k8s.wait_for_pod_failover(master_nodes, 'spilo-role=master,' + cluster_label) k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) - self.eventuallyEqual(check_version, 16, "Version should be upgraded from 14 to 16") + self.eventuallyEqual(check_version, 15, "Version should be upgraded from 14 to 15") # check if annotation for last upgrade's success is updated after second upgrade third_annotations = get_annotations() @@ -1303,20 +1304,20 @@ class EndToEndTestCase(unittest.TestCase): "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_17) self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") - k8s.wait_for_pod_failover(master_nodes, 'spilo-role=master,' + cluster_label) - k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) - k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) - self.eventuallyEqual(check_version, 16, "Version should not be upgraded because annotation for last upgrade's failure is set") - - # change the version back to 15 and should remove failure annotation - k8s.api.custom_objects_api.patch_namespaced_custom_object( - "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15) - self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") - k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) + self.eventuallyEqual(check_version, 15, "Version should not be upgraded because annotation for last upgrade's failure is set") + # change the version back to 15 and should remove failure annotation + k8s.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15_in_mw) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + + k8s.wait_for_pod_start('spilo-role=master,' + cluster_label) + k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) + + self.eventuallyEqual(check_version, 15, "Version should not be upgraded from 15") fourth_annotations = get_annotations() self.assertIsNone(fourth_annotations.get("last-major-upgrade-failure"), "Annotation for last upgrade's failure is not removed") @@ -1752,9 +1753,13 @@ class EndToEndTestCase(unittest.TestCase): Test password rotation and removal of users due to retention policy ''' k8s = self.k8s + cluster_label = 'application=spilo,cluster-name=acid-minimal-cluster' leader = k8s.get_cluster_leader_pod() today = date.today() + # remember number of secrets to make sure it stays the same + secret_count = k8s.count_secrets_with_label(cluster_label) + # enable password rotation for owner of foo database pg_patch_rotation_single_users = { "spec": { @@ -1810,6 +1815,7 @@ class EndToEndTestCase(unittest.TestCase): enable_password_rotation = { "data": { "enable_password_rotation": "true", + "inherited_annotations": "environment", "password_rotation_interval": "30", "password_rotation_user_retention": "30", # should be set to 60 }, @@ -1856,13 +1862,29 @@ class EndToEndTestCase(unittest.TestCase): self.eventuallyEqual(lambda: len(self.query_database_with_user(leader.metadata.name, "postgres", "SELECT 1", "foo_user")), 1, "Could not connect to the database with rotation user {}".format(rotation_user), 10, 5) + # add annotation which triggers syncSecrets call + pg_annotation_patch = { + "metadata": { + "annotations": { + "environment": "test", + } + } + } + k8s.api.custom_objects_api.patch_namespaced_custom_object( + "acid.zalan.do", "v1", "default", "postgresqls", "acid-minimal-cluster", pg_annotation_patch) + self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") + time.sleep(10) + self.eventuallyEqual(lambda: k8s.count_secrets_with_label(cluster_label), secret_count, "Unexpected number of secrets") + # check if rotation has been ignored for user from test_cross_namespace_secrets test db_user_secret = k8s.get_secret(username="test.db_user", namespace="test") secret_username = str(base64.b64decode(db_user_secret.data["username"]), 'utf-8') - self.assertEqual("test.db_user", secret_username, "Unexpected username in secret of test.db_user: expected {}, got {}".format("test.db_user", secret_username)) + # check if annotation for secret has been updated + self.assertTrue("environment" in db_user_secret.metadata.annotations, "Added annotation was not propagated to secret") + # disable password rotation for all other users (foo_user) # and pick smaller intervals to see if the third fake rotation user is dropped enable_password_rotation = { @@ -2100,7 +2122,7 @@ class EndToEndTestCase(unittest.TestCase): patch_sset_propagate_annotations = { "data": { "downscaler_annotations": "deployment-time,downscaler/*", - "inherited_annotations": "owned-by", + "inherited_annotations": "environment,owned-by", } } k8s.update_config(patch_sset_propagate_annotations) @@ -2547,7 +2569,10 @@ class EndToEndTestCase(unittest.TestCase): self.assertTrue(self.has_postgresql_owner_reference(config_ep.metadata.owner_references, inverse), "config endpoint owner reference check failed") pdb = k8s.api.policy_v1.read_namespaced_pod_disruption_budget("postgres-{}-pdb".format(cluster_name), cluster_namespace) - self.assertTrue(self.has_postgresql_owner_reference(pdb.metadata.owner_references, inverse), "pod disruption owner reference check failed") + self.assertTrue(self.has_postgresql_owner_reference(pdb.metadata.owner_references, inverse), "primary pod disruption budget owner reference check failed") + + pdb = k8s.api.policy_v1.read_namespaced_pod_disruption_budget("postgres-{}-critical-op-pdb".format(cluster_name), cluster_namespace) + self.assertTrue(self.has_postgresql_owner_reference(pdb.metadata.owner_references, inverse), "pod disruption budget for critical operations owner reference check failed") pg_secret = k8s.api.core_v1.read_namespaced_secret("postgres.{}.credentials.postgresql.acid.zalan.do".format(cluster_name), cluster_namespace) self.assertTrue(self.has_postgresql_owner_reference(pg_secret.metadata.owner_references, inverse), "postgres secret owner reference check failed") diff --git a/go.mod b/go.mod index 9c0125229..df7c94b5b 100644 --- a/go.mod +++ b/go.mod @@ -1,75 +1,75 @@ module github.com/zalando/postgres-operator -go 1.23.4 +go 1.25.3 require ( - github.com/aws/aws-sdk-go v1.53.8 + github.com/Masterminds/semver v1.5.0 + github.com/aws/aws-sdk-go v1.55.8 github.com/golang/mock v1.6.0 github.com/lib/pq v1.10.9 github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d github.com/pkg/errors v0.9.1 github.com/r3labs/diff v1.1.0 github.com/sirupsen/logrus v1.9.3 - github.com/stretchr/testify v1.9.0 - golang.org/x/crypto v0.31.0 - golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 + github.com/stretchr/testify v1.11.1 + golang.org/x/crypto v0.45.0 gopkg.in/yaml.v2 v2.4.0 - k8s.io/api v0.30.4 + k8s.io/api v0.32.9 k8s.io/apiextensions-apiserver v0.25.9 - k8s.io/apimachinery v0.30.4 - k8s.io/client-go v0.30.4 + k8s.io/apimachinery v0.32.9 + k8s.io/client-go v0.32.9 k8s.io/code-generator v0.25.9 ) require ( - github.com/Masterminds/semver v1.5.0 - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/evanphx/json-patch v4.12.0+incompatible // indirect - github.com/go-logr/logr v1.4.1 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/go-cmp v0.6.0 // indirect + github.com/google/gnostic-models v0.6.9 // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/uuid v1.3.0 // indirect - github.com/gorilla/websocket v1.5.0 // indirect - github.com/imdario/mergo v0.3.6 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/jmespath/go-jmespath v0.4.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kr/text v0.2.0 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/moby/spdystream v0.2.0 // indirect + github.com/moby/spdystream v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/mod v0.17.0 // indirect - golang.org/x/net v0.25.0 // indirect - golang.org/x/oauth2 v0.10.0 // indirect - golang.org/x/sync v0.10.0 // indirect - golang.org/x/sys v0.28.0 // indirect - golang.org/x/term v0.27.0 // indirect - golang.org/x/text v0.21.0 // indirect - golang.org/x/time v0.3.0 // indirect - golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.33.0 // indirect + github.com/x448/float16 v0.8.4 // indirect + golang.org/x/mod v0.29.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/sync v0.18.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect + golang.org/x/time v0.9.0 // indirect + golang.org/x/tools v0.38.0 // indirect + golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated // indirect + google.golang.org/protobuf v1.36.5 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/gengo v0.0.0-20220902162205-c0856e24416d // indirect - k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 // indirect - k8s.io/klog/v2 v2.120.1 // indirect - k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/go.sum b/go.sum index 0e55f2dd7..581054d7c 100644 --- a/go.sum +++ b/go.sum @@ -2,55 +2,52 @@ github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3Q github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/aws/aws-sdk-go v1.53.8 h1:eoqGb1WOHIrCFKo1d51cMcnt1ralfLFaEqRkC5Zzv8k= -github.com/aws/aws-sdk-go v1.53.8/go.mod h1:LF8svs817+Nz+DmiMQKTO3ubZ/6IaTpq3TjupRn3Eqk= +github.com/aws/aws-sdk-go v1.55.8 h1:JRmEUbU52aJQZ2AjX4q4Wu7t4uZjOu71uyNmaWlUkJQ= +github.com/aws/aws-sdk-go v1.55.8/go.mod h1:ZkViS9AqA6otK+JBBNH2++sx1sgxrPKcSzPPvQkUtXk= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/evanphx/json-patch v4.12.0+incompatible h1:4onqiflcdA9EOZ4RxV643DvftH5pOlLGNtQ5lPWQu84= -github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= -github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8= @@ -73,8 +70,8 @@ github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= +github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -86,18 +83,19 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= -github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= -github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE= -github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk= +github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/r3labs/diff v1.1.0 h1:V53xhrbTHrWFWq3gI4b94AjgEJOerO1+1l0xyHOBi8M= github.com/r3labs/diff v1.1.0/go.mod h1:7WjXasNzi0vJetRcB/RqNl5dlIsmXcTTLmF5IoH6Xig= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= @@ -111,39 +109,40 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o= -golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.29.0 h1:HV8lRxZC4l2cr3Zq1LvtOsi/ThTgWnUk/y64QSs8GwA= +golang.org/x/mod v0.29.0/go.mod h1:NyhrlYXJ2H4eJiRy/WDBO6HMqZQ6q9nk4JzS3NuCK+w= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= -golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= -golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.18.0 h1:kr88TuHDroi+UVf+0hZnirlk8o8T+4MrK6mr60WkH/I= +golang.org/x/sync v0.18.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -151,38 +150,41 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200505023115-26f46d2f7ef8/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= +golang.org/x/tools/go/expect v0.1.0-deprecated h1:jY2C5HGYR5lqex3gEniOQL0r7Dq5+VGVgY1nudX5lXY= +golang.org/x/tools/go/expect v0.1.0-deprecated/go.mod h1:eihoPOH+FgIqa3FpoTwguz/bVUSGBlGQU67vpBeOrBY= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated h1:1h2MnaIAIXISqTFKdENegdpAgUXz6NrPEsbIeWaBRvM= +golang.org/x/tools/go/packages/packagestest v0.1.1-deprecated/go.mod h1:RVAQXBGNv1ib0J382/DPCRS/BPnsGebyM1Gj5VSDpG8= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -192,31 +194,34 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.30.4 h1:XASIELmW8w8q0i1Y4124LqPoWMycLjyQti/fdYHYjCs= -k8s.io/api v0.30.4/go.mod h1:ZqniWRKu7WIeLijbbzetF4U9qZ03cg5IRwl8YVs8mX0= +k8s.io/api v0.32.9 h1:q/59kk8lnecgG0grJqzrmXC1Jcl2hPWp9ltz0FQuoLI= +k8s.io/api v0.32.9/go.mod h1:jIfT3rwW4EU1IXZm9qjzSk/2j91k4CJL5vUULrxqp3Y= k8s.io/apiextensions-apiserver v0.25.9 h1:Pycd6lm2auABp9wKQHCFSEPG+NPdFSTJXPST6NJFzB8= k8s.io/apiextensions-apiserver v0.25.9/go.mod h1:ijGxmSG1GLOEaWhTuaEr0M7KUeia3mWCZa6FFQqpt1M= -k8s.io/apimachinery v0.30.4 h1:5QHQI2tInzr8LsT4kU/2+fSeibH1eIHswNx480cqIoY= -k8s.io/apimachinery v0.30.4/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= -k8s.io/client-go v0.30.4 h1:eculUe+HPQoPbixfwmaSZGsKcOf7D288tH6hDAdd+wY= -k8s.io/client-go v0.30.4/go.mod h1:IBS0R/Mt0LHkNHF4E6n+SUDPG7+m2po6RZU7YHeOpzc= +k8s.io/apimachinery v0.32.9 h1:fXk8ktfsxrdThaEOAQFgkhCK7iyoyvS8nbYJ83o/SSs= +k8s.io/apimachinery v0.32.9/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/client-go v0.32.9 h1:ZMyIQ1TEpTDAQni3L2gH1NZzyOA/gHfNcAazzCxMJ0c= +k8s.io/client-go v0.32.9/go.mod h1:2OT8aFSYvUjKGadaeT+AVbhkXQSpMAkiSb88Kz2WggI= k8s.io/code-generator v0.25.9 h1:lgyAV9AIRYNxZxgLRXqsCAtqJLHvakot41CjEqD5W0w= k8s.io/code-generator v0.25.9/go.mod h1:DHfpdhSUrwqF0f4oLqCtF8gYbqlndNetjBEz45nWzJI= k8s.io/gengo v0.0.0-20220902162205-c0856e24416d h1:U9tB195lKdzwqicbJvyJeOXV7Klv+wNAWENRnXEGi08= k8s.io/gengo v0.0.0-20220902162205-c0856e24416d/go.mod h1:FiNAH4ZV3gBg2Kwh89tzAEV2be7d5xI0vBa/VySYy3E= -k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70 h1:NGrVE502P0s0/1hudf8zjgwki1X/TByhmAoILTarmzo= -k8s.io/gengo/v2 v2.0.0-20240228010128-51d4e06bde70/go.mod h1:VH3AT8AaQOqiGjMF9p0/IM1Dj+82ZwjfxUP1IxaHE+8= +k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7 h1:cErOOTkQ3JW19o4lo91fFurouhP8NcoBvb7CkvhZZpk= +k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7/go.mod h1:EJykeLsmFC60UQbYJezXkEsG2FLrt0GPNkU5iK5GWxU= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/kubectl-pg/cmd/connect.go b/kubectl-pg/cmd/connect.go index 2c6d87835..a7643ca05 100644 --- a/kubectl-pg/cmd/connect.go +++ b/kubectl-pg/cmd/connect.go @@ -23,6 +23,7 @@ THE SOFTWARE. package cmd import ( + "context" "log" "os" user "os/user" @@ -121,7 +122,7 @@ func connect(clusterName string, master bool, replica string, psql bool, user st log.Fatal(err) } - err = exec.Stream(remotecommand.StreamOptions{ + err = exec.StreamWithContext(context.TODO(), remotecommand.StreamOptions{ Stdin: os.Stdin, Stdout: os.Stdout, Stderr: os.Stderr, diff --git a/kubectl-pg/cmd/version.go b/kubectl-pg/cmd/version.go index e9a1e8056..23cc55422 100644 --- a/kubectl-pg/cmd/version.go +++ b/kubectl-pg/cmd/version.go @@ -65,7 +65,7 @@ func version(namespace string) { operatorDeployment := getPostgresOperator(client) if operatorDeployment.Name == "" { - log.Fatal("make sure zalando's postgres operator is running") + log.Fatalf("make sure zalando's postgres operator is running in namespace %s", namespace) } operatorImage := operatorDeployment.Spec.Template.Spec.Containers[0].Image imageDetails := strings.Split(operatorImage, ":") diff --git a/kubectl-pg/go.mod b/kubectl-pg/go.mod index 9b2e1bbc5..7f80cbfd7 100644 --- a/kubectl-pg/go.mod +++ b/kubectl-pg/go.mod @@ -1,74 +1,72 @@ module github.com/zalando/postgres-operator/kubectl-pg -go 1.23.4 +go 1.25.3 require ( - github.com/spf13/cobra v1.8.1 - github.com/spf13/viper v1.19.0 - github.com/zalando/postgres-operator v1.13.0 - k8s.io/api v0.30.4 + github.com/spf13/cobra v1.10.1 + github.com/spf13/viper v1.21.0 + github.com/zalando/postgres-operator v1.15.0 + k8s.io/api v0.32.9 k8s.io/apiextensions-apiserver v0.25.9 - k8s.io/apimachinery v0.30.4 - k8s.io/client-go v0.30.4 + k8s.io/apimachinery v0.32.9 + k8s.io/client-go v0.32.9 ) require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect - github.com/fsnotify/fsnotify v1.7.0 // indirect - github.com/go-logr/logr v1.4.1 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/fsnotify/fsnotify v1.9.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect + github.com/go-logr/logr v1.4.2 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/swag v0.23.0 // indirect + github.com/go-viper/mapstructure/v2 v2.4.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/gnostic-models v0.6.8 // indirect + github.com/google/gnostic-models v0.6.9 // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/google/gofuzz v1.2.0 // indirect - github.com/google/uuid v1.4.0 // indirect - github.com/gorilla/websocket v1.5.0 // indirect - github.com/hashicorp/hcl v1.0.0 // indirect - github.com/imdario/mergo v0.3.6 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/kr/text v0.2.0 // indirect - github.com/magiconair/properties v1.8.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect - github.com/mitchellh/mapstructure v1.5.0 // indirect - github.com/moby/spdystream v0.2.0 // indirect + github.com/moby/spdystream v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/motomux/pretty v0.0.0-20161209205251-b2aad2c9a95d // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect - github.com/pelletier/go-toml/v2 v2.2.2 // indirect - github.com/sagikazarmark/locafero v0.4.0 // indirect - github.com/sagikazarmark/slog-shim v0.1.0 // indirect + github.com/pelletier/go-toml/v2 v2.2.4 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/sagikazarmark/locafero v0.11.0 // indirect github.com/sirupsen/logrus v1.9.3 // indirect - github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spf13/afero v1.11.0 // indirect - github.com/spf13/cast v1.6.0 // indirect - github.com/spf13/pflag v1.0.5 // indirect + github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 // indirect + github.com/spf13/afero v1.15.0 // indirect + github.com/spf13/cast v1.10.0 // indirect + github.com/spf13/pflag v1.0.10 // indirect github.com/subosito/gotenv v1.6.0 // indirect - go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.31.0 // indirect - golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect - golang.org/x/net v0.25.0 // indirect - golang.org/x/oauth2 v0.18.0 // indirect - golang.org/x/sys v0.28.0 // indirect - golang.org/x/term v0.27.0 // indirect - golang.org/x/text v0.21.0 // indirect - golang.org/x/time v0.5.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/protobuf v1.33.0 // indirect + github.com/x448/float16 v0.8.4 // indirect + go.yaml.in/yaml/v3 v3.0.4 // indirect + golang.org/x/crypto v0.45.0 // indirect + golang.org/x/net v0.47.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect + golang.org/x/sys v0.38.0 // indirect + golang.org/x/term v0.37.0 // indirect + golang.org/x/text v0.31.0 // indirect + golang.org/x/time v0.9.0 // indirect + google.golang.org/protobuf v1.36.5 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/ini.v1 v1.67.0 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/klog/v2 v2.120.1 // indirect - k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 // indirect - k8s.io/utils v0.0.0-20230726121419-3b25d923346b // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/kubectl-pg/go.sum b/kubectl-pg/go.sum index 2237a9e03..488d24edc 100644 --- a/kubectl-pg/go.sum +++ b/kubectl-pg/go.sum @@ -1,6 +1,6 @@ github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -10,44 +10,42 @@ github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxER github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= -github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= -github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= -github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= -github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= +github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= +github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= -github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= +github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= +github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= +github.com/go-viper/mapstructure/v2 v2.4.0 h1:EBsztssimR/CONLSZZ04E8qAkxNYq4Qp9LvH92wZUgs= +github.com/go-viper/mapstructure/v2 v2.4.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= -github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/uuid v1.4.0 h1:MtMxsa51/r9yyhkyLsVeVt0B+BGQZzpQiTQ4eHZ8bc4= -github.com/google/uuid v1.4.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= -github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= -github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db h1:097atOisP2aRj7vFgYQBbFN4U4JNXUNYpxael3UzMyo= +github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= @@ -63,14 +61,10 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY= -github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= -github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= -github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= -github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8= -github.com/moby/spdystream v0.2.0/go.mod h1:f7i0iNDQJ059oMTcWxx8MA/zKFIuD/lY+0GqbN2Wy8c= +github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= +github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -82,151 +76,131 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= -github.com/onsi/ginkgo/v2 v2.15.0 h1:79HwNRBAZHOEwrczrgSOPy+eFTTlIGELKy5as+ClttY= -github.com/onsi/ginkgo/v2 v2.15.0/go.mod h1:HlxMHtYF57y6Dpf+mc5529KKmSq9h2FpCF+/ZkwUxKM= -github.com/onsi/gomega v1.31.0 h1:54UJxxj6cPInHS3a35wm6BK/F9nHYueZ1NVujHDrnXE= -github.com/onsi/gomega v1.31.0/go.mod h1:DW9aCi7U6Yi40wNVAvT6kzFnEVEI5n3DloYBiKiT6zk= -github.com/pelletier/go-toml/v2 v2.2.2 h1:aYUidT7k73Pcl9nb2gScu7NSrKCSHIDE89b3+6Wq+LM= -github.com/pelletier/go-toml/v2 v2.2.2/go.mod h1:1t835xjRzz80PqgE6HHgN2JOsmgYu/h4qDAS4n929Rs= +github.com/onsi/ginkgo/v2 v2.21.0 h1:7rg/4f3rB88pb5obDgNZrNHrQ4e6WpjonchcpuBRnZM= +github.com/onsi/ginkgo/v2 v2.21.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo= +github.com/onsi/gomega v1.35.1 h1:Cwbd75ZBPxFSuZ6T+rN/WCb/gOc6YgFBXLlZLhC7Ds4= +github.com/onsi/gomega v1.35.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog= +github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4= +github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8= +github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= -github.com/sagikazarmark/locafero v0.4.0 h1:HApY1R9zGo4DBgr7dqsTH/JJxLTTsOt7u6keLGt6kNQ= -github.com/sagikazarmark/locafero v0.4.0/go.mod h1:Pe1W6UlPYUk/+wc/6KFhbORCfqzgYEpgQ3O5fPuL3H4= -github.com/sagikazarmark/slog-shim v0.1.0 h1:diDBnUNK9N/354PgrxMywXnAwEr1QZcOr6gto+ugjYE= -github.com/sagikazarmark/slog-shim v0.1.0/go.mod h1:SrcSrq8aKtyuqEI1uvTDTK1arOWRIczQRv+GVI1AkeQ= +github.com/sagikazarmark/locafero v0.11.0 h1:1iurJgmM9G3PA/I+wWYIOw/5SyBtxapeHDcg+AAIFXc= +github.com/sagikazarmark/locafero v0.11.0/go.mod h1:nVIGvgyzw595SUSUE6tvCp3YYTeHs15MvlmU87WwIik= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= -github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= -github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spf13/afero v1.11.0 h1:WJQKhtpdm3v2IzqG8VMqrr6Rf3UYpEF239Jy9wNepM8= -github.com/spf13/afero v1.11.0/go.mod h1:GH9Y3pIexgf1MTIWtNGyogA5MwRIDXGUr+hbWNoBjkY= -github.com/spf13/cast v1.6.0 h1:GEiTHELF+vaR5dhz3VqZfFSzZjYbgeKDpBxQVS4GYJ0= -github.com/spf13/cast v1.6.0/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= -github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= -github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= -github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= -github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= -github.com/spf13/viper v1.19.0 h1:RWq5SEjt8o25SROyN3z2OrDB9l7RPd3lwTWU8EcEdcI= -github.com/spf13/viper v1.19.0/go.mod h1:GQUN9bilAbhU/jgc1bKs99f/suXKeUMct8Adx5+Ntkg= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8 h1:+jumHNA0Wrelhe64i8F6HNlS8pkoyMv5sreGx2Ry5Rw= +github.com/sourcegraph/conc v0.3.1-0.20240121214520-5f936abd7ae8/go.mod h1:3n1Cwaq1E1/1lhQhtRK2ts/ZwZEhjcQeJQ1RuC6Q/8U= +github.com/spf13/afero v1.15.0 h1:b/YBCLWAJdFWJTN9cLhiXXcD7mzKn9Dm86dNnfyQw1I= +github.com/spf13/afero v1.15.0/go.mod h1:NC2ByUVxtQs4b3sIUphxK0NioZnmxgyCrfzeuq8lxMg= +github.com/spf13/cast v1.10.0 h1:h2x0u2shc1QuLHfxi+cTJvs30+ZAHOGRic8uyGTDWxY= +github.com/spf13/cast v1.10.0/go.mod h1:jNfB8QC9IA6ZuY2ZjDp0KtFO2LZZlg4S/7bzP6qqeHo= +github.com/spf13/cobra v1.10.1 h1:lJeBwCfmrnXthfAupyUTzJ/J4Nc1RsHC/mSRU2dll/s= +github.com/spf13/cobra v1.10.1/go.mod h1:7SmJGaTHFVBY0jW4NXGluQoLvhqFQM+6XSKD+P4XaB0= +github.com/spf13/pflag v1.0.9/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/pflag v1.0.10 h1:4EBh2KAYBwaONj6b2Ye1GiHfwjqyROoF4RwYO+vPwFk= +github.com/spf13/pflag v1.0.10/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.21.0 h1:x5S+0EU27Lbphp4UKm1C+1oQO+rKx36vfCoaVebLFSU= +github.com/spf13/viper v1.21.0/go.mod h1:P0lhsswPGWD/1lZJ9ny3fYnVqxiegrlNrEmgLjbTCAY= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zalando/postgres-operator v1.13.0 h1:T9Mb+ZRQyTxXbagIK66GLVGCwM3661aX2lOkNpax4s8= -github.com/zalando/postgres-operator v1.13.0/go.mod h1:WiMEKzUny2lJHYle+7+D/5BhlvPn8prl76rEDYLsQAg= -go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= -go.uber.org/multierr v1.11.0/go.mod h1:20+QtiLqy0Nd6FdQB9TLXag12DsQkrbs3htMFfDN80Y= +github.com/zalando/postgres-operator v1.15.0 h1:is/7cOrpuV7OwMiN7TG7GgiYHKvaWx8Ptw3hJruFO1I= +github.com/zalando/postgres-operator v1.15.0/go.mod h1:1cSOA5dG2dEqdG0uami1RHTGYX92bgAKYASfAhuMtHE= +go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= +go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= -golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= -golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o= -golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= +golang.org/x/crypto v0.45.0 h1:jMBrvKuj23MTlT0bQEOBcAE0mjg8mK9RXFhRH6nyF3Q= +golang.org/x/crypto v0.45.0/go.mod h1:XTGrrkGJve7CYK7J8PEww4aY7gM3qMCElcJQ8n8JdX4= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= -golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI= -golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8= +golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY= +golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= -golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= -golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= +golang.org/x/sys v0.38.0 h1:3yZWxaJjBmCWXqhN1qh02AkOnCQ1poK6oF+a7xWL6Gc= +golang.org/x/sys v0.38.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.37.0 h1:8EGAD0qCmHYZg6J17DvsMy9/wJ7/D/4pV/wfnld5lTU= +golang.org/x/term v0.37.0/go.mod h1:5pB4lxRNYYVZuTLmy8oR2BH8dflOR+IbTYFD8fi3254= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= -golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= -golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= -golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/text v0.31.0 h1:aC8ghyu4JhP8VojJ2lEHBnochRno1sgL6nEi9WGFGMM= +golang.org/x/text v0.31.0/go.mod h1:tKRAlv61yKIjGGHX/4tP1LTbc13YSec1pxVEWXzfoeM= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= -golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= +golang.org/x/tools v0.38.0 h1:Hx2Xv8hISq8Lm16jvBZ2VQf+RLmbd7wVUsALibYI/IQ= +golang.org/x/tools v0.38.0/go.mod h1:yEsQ/d/YK8cjh0L6rZlY8tgtlKiBNTL14pGDJPJpYQs= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= -gopkg.in/ini.v1 v1.67.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.30.4 h1:XASIELmW8w8q0i1Y4124LqPoWMycLjyQti/fdYHYjCs= -k8s.io/api v0.30.4/go.mod h1:ZqniWRKu7WIeLijbbzetF4U9qZ03cg5IRwl8YVs8mX0= +k8s.io/api v0.32.9 h1:q/59kk8lnecgG0grJqzrmXC1Jcl2hPWp9ltz0FQuoLI= +k8s.io/api v0.32.9/go.mod h1:jIfT3rwW4EU1IXZm9qjzSk/2j91k4CJL5vUULrxqp3Y= k8s.io/apiextensions-apiserver v0.25.9 h1:Pycd6lm2auABp9wKQHCFSEPG+NPdFSTJXPST6NJFzB8= k8s.io/apiextensions-apiserver v0.25.9/go.mod h1:ijGxmSG1GLOEaWhTuaEr0M7KUeia3mWCZa6FFQqpt1M= -k8s.io/apimachinery v0.30.4 h1:5QHQI2tInzr8LsT4kU/2+fSeibH1eIHswNx480cqIoY= -k8s.io/apimachinery v0.30.4/go.mod h1:iexa2somDaxdnj7bha06bhb43Zpa6eWH8N8dbqVjTUc= -k8s.io/client-go v0.30.4 h1:eculUe+HPQoPbixfwmaSZGsKcOf7D288tH6hDAdd+wY= -k8s.io/client-go v0.30.4/go.mod h1:IBS0R/Mt0LHkNHF4E6n+SUDPG7+m2po6RZU7YHeOpzc= -k8s.io/klog/v2 v2.120.1 h1:QXU6cPEOIslTGvZaXvFWiP9VKyeet3sawzTOvdXb4Vw= -k8s.io/klog/v2 v2.120.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340 h1:BZqlfIlq5YbRMFko6/PM7FjZpUb45WallggurYhKGag= -k8s.io/kube-openapi v0.0.0-20240228011516-70dd3763d340/go.mod h1:yD4MZYeKMBwQKVht279WycxKyM84kkAx2DPrTXaeb98= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b h1:sgn3ZU783SCgtaSJjpcVVlRqd6GSnlTLKgpAAttJvpI= -k8s.io/utils v0.0.0-20230726121419-3b25d923346b/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1 h1:150L+0vs/8DA78h1u02ooW1/fFq/Lwr+sGiqlzvrtq4= -sigs.k8s.io/structured-merge-diff/v4 v4.4.1/go.mod h1:N8hJocpFajUSSeSJ9bOZ77VzejKZaXsTtZo4/u7Io08= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +k8s.io/apimachinery v0.32.9 h1:fXk8ktfsxrdThaEOAQFgkhCK7iyoyvS8nbYJ83o/SSs= +k8s.io/apimachinery v0.32.9/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/client-go v0.32.9 h1:ZMyIQ1TEpTDAQni3L2gH1NZzyOA/gHfNcAazzCxMJ0c= +k8s.io/client-go v0.32.9/go.mod h1:2OT8aFSYvUjKGadaeT+AVbhkXQSpMAkiSb88Kz2WggI= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/logical-backup/dump.sh b/logical-backup/dump.sh index 25641c3b5..a250670a6 100755 --- a/logical-backup/dump.sh +++ b/logical-backup/dump.sh @@ -122,7 +122,21 @@ function aws_upload { function gcs_upload { PATH_TO_BACKUP=gs://$LOGICAL_BACKUP_S3_BUCKET"/"$LOGICAL_BACKUP_S3_BUCKET_PREFIX"/"$SCOPE$LOGICAL_BACKUP_S3_BUCKET_SCOPE_SUFFIX"/logical_backups/"$(date +%s).sql.gz - gsutil -o Credentials:gs_service_key_file=$LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS cp - "$PATH_TO_BACKUP" + #Set local LOGICAL_GOOGLE_APPLICATION_CREDENTIALS to nothing or + #value of LOGICAL_GOOGLE_APPLICATION_CREDENTIALS env var. Needed + #because `set -o nounset` is globally set + local LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS=${LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS:-} + + GSUTIL_OPTIONS=("-o" "Credentials:gs_service_key_file=$LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS") + + #If GOOGLE_APPLICATION_CREDENTIALS is not set try to get + #creds from metadata + if [[ -z $LOGICAL_BACKUP_GOOGLE_APPLICATION_CREDENTIALS ]] + then + GSUTIL_OPTIONS[1]="GoogleCompute:service_account=default" + fi + + gsutil ${GSUTIL_OPTIONS[@]} cp - "$PATH_TO_BACKUP" } function upload { diff --git a/manifests/complete-postgres-manifest.yaml b/manifests/complete-postgres-manifest.yaml index 44d317123..7677dca62 100644 --- a/manifests/complete-postgres-manifest.yaml +++ b/manifests/complete-postgres-manifest.yaml @@ -10,7 +10,7 @@ metadata: # "delete-date": "2020-08-31" # can only be deleted on that day if "delete-date "key is configured # "delete-clustername": "acid-test-cluster" # can only be deleted when name matches if "delete-clustername" key is configured spec: - dockerImage: ghcr.io/zalando/spilo-17:4.0-p2 + dockerImage: ghcr.io/zalando/spilo-17:4.0-p3 teamId: "acid" numberOfInstances: 2 users: # Application/Robot users diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index 9473ef5ec..fcf08c3f8 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -34,7 +34,7 @@ data: default_memory_request: 100Mi # delete_annotation_date_key: delete-date # delete_annotation_name_key: delete-clustername - docker_image: ghcr.io/zalando/spilo-17:4.0-p2 + docker_image: ghcr.io/zalando/spilo-17:4.0-p3 # downscaler_annotations: "deployment-time,downscaler/*" enable_admin_role_for_users: "true" enable_crd_registration: "true" @@ -86,7 +86,7 @@ data: # logical_backup_cpu_limit: "" # logical_backup_cpu_request: "" logical_backup_cronjob_environment_secret: "" - logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0" + logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.15.1" # logical_backup_google_application_credentials: "" logical_backup_job_prefix: "logical-backup-" # logical_backup_memory_limit: "" diff --git a/manifests/operator-service-account-rbac.yaml b/manifests/operator-service-account-rbac.yaml index bf27f99f1..2cc1edcd1 100644 --- a/manifests/operator-service-account-rbac.yaml +++ b/manifests/operator-service-account-rbac.yaml @@ -59,13 +59,20 @@ rules: - get - patch - update -# to read configuration from ConfigMaps +# to read configuration from ConfigMaps and help Patroni manage the cluster if endpoints are not used - apiGroups: - "" resources: - configmaps verbs: + - create + - delete + - deletecollection - get + - list + - patch + - update + - watch # to send events to the CRs - apiGroups: - "" @@ -78,7 +85,7 @@ rules: - patch - update - watch -# to manage endpoints which are also used by Patroni +# to manage endpoints which are also used by Patroni (if it is using config maps) - apiGroups: - "" resources: @@ -249,7 +256,21 @@ kind: ClusterRole metadata: name: postgres-pod rules: -# Patroni needs to watch and manage endpoints +# Patroni needs to watch and manage config maps (or endpoints) +- apiGroups: + - "" + resources: + - configmaps + verbs: + - create + - delete + - deletecollection + - get + - list + - patch + - update + - watch +# Patroni needs to watch and manage endpoints (or config maps) - apiGroups: - "" resources: diff --git a/manifests/operatorconfiguration.crd.yaml b/manifests/operatorconfiguration.crd.yaml index ded2477d7..466f5190e 100644 --- a/manifests/operatorconfiguration.crd.yaml +++ b/manifests/operatorconfiguration.crd.yaml @@ -66,7 +66,7 @@ spec: type: string docker_image: type: string - default: "ghcr.io/zalando/spilo-17:4.0-p2" + default: "ghcr.io/zalando/spilo-17:4.0-p3" enable_crd_registration: type: boolean default: true @@ -508,7 +508,7 @@ spec: pattern: '^(\d+m|\d+(\.\d{1,3})?)$' logical_backup_docker_image: type: string - default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0" + default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.15.1" logical_backup_google_application_credentials: type: string logical_backup_job_prefix: diff --git a/manifests/postgres-operator.yaml b/manifests/postgres-operator.yaml index e3f77657e..253649078 100644 --- a/manifests/postgres-operator.yaml +++ b/manifests/postgres-operator.yaml @@ -19,7 +19,7 @@ spec: serviceAccountName: postgres-operator containers: - name: postgres-operator - image: ghcr.io/zalando/postgres-operator:v1.14.0 + image: ghcr.io/zalando/postgres-operator:v1.15.1 imagePullPolicy: IfNotPresent resources: requests: diff --git a/manifests/postgresql-operator-default-configuration.yaml b/manifests/postgresql-operator-default-configuration.yaml index 570ebd338..5251f9a06 100644 --- a/manifests/postgresql-operator-default-configuration.yaml +++ b/manifests/postgresql-operator-default-configuration.yaml @@ -3,7 +3,7 @@ kind: OperatorConfiguration metadata: name: postgresql-operator-default-configuration configuration: - docker_image: ghcr.io/zalando/spilo-17:4.0-p2 + docker_image: ghcr.io/zalando/spilo-17:4.0-p3 # enable_crd_registration: true # crd_categories: # - all @@ -168,7 +168,7 @@ configuration: # logical_backup_cpu_request: "" # logical_backup_memory_limit: "" # logical_backup_memory_request: "" - logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0" + logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.15.1" # logical_backup_google_application_credentials: "" logical_backup_job_prefix: "logical-backup-" logical_backup_provider: "s3" diff --git a/manifests/postgresql.crd.yaml b/manifests/postgresql.crd.yaml index 39d751cef..7a1b21a4d 100644 --- a/manifests/postgresql.crd.yaml +++ b/manifests/postgresql.crd.yaml @@ -276,7 +276,6 @@ spec: items: type: string weight: - format: int32 type: integer requiredDuringSchedulingIgnoredDuringExecution: type: object diff --git a/pkg/apis/acid.zalan.do/v1/crds.go b/pkg/apis/acid.zalan.do/v1/crds.go index 3f6bf25d9..b89cb1448 100644 --- a/pkg/apis/acid.zalan.do/v1/crds.go +++ b/pkg/apis/acid.zalan.do/v1/crds.go @@ -436,8 +436,7 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ }, }, "weight": { - Type: "integer", - Format: "int32", + Type: "integer", }, }, }, @@ -768,9 +767,9 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ }, }, OneOf: []apiextv1.JSONSchemaProps{ - apiextv1.JSONSchemaProps{Required: []string{"s3_wal_path"}}, - apiextv1.JSONSchemaProps{Required: []string{"gs_wal_path"}}, - apiextv1.JSONSchemaProps{Required: []string{"standby_host"}}, + {Required: []string{"s3_wal_path"}}, + {Required: []string{"gs_wal_path"}}, + {Required: []string{"standby_host"}}, }, }, "streams": { diff --git a/pkg/apis/acid.zalan.do/v1/util_test.go b/pkg/apis/acid.zalan.do/v1/util_test.go index 5e4913ffe..9f3fe9bde 100644 --- a/pkg/apis/acid.zalan.do/v1/util_test.go +++ b/pkg/apis/acid.zalan.do/v1/util_test.go @@ -787,8 +787,6 @@ func TestPostgresListMeta(t *testing.T) { if a := tt.out.GetListMeta(); reflect.DeepEqual(a, tt.out.ListMeta) { t.Errorf("GetObjectMeta expected: %v, got: %v", tt.out.ListMeta, a) } - - return }) } } diff --git a/pkg/cluster/cluster.go b/pkg/cluster/cluster.go index 1a8d6f762..b6a4e24a8 100644 --- a/pkg/cluster/cluster.go +++ b/pkg/cluster/cluster.go @@ -59,16 +59,17 @@ type Config struct { } type kubeResources struct { - Services map[PostgresRole]*v1.Service - Endpoints map[PostgresRole]*v1.Endpoints - PatroniEndpoints map[string]*v1.Endpoints - PatroniConfigMaps map[string]*v1.ConfigMap - Secrets map[types.UID]*v1.Secret - Statefulset *appsv1.StatefulSet - VolumeClaims map[types.UID]*v1.PersistentVolumeClaim - PodDisruptionBudget *policyv1.PodDisruptionBudget - LogicalBackupJob *batchv1.CronJob - Streams map[string]*zalandov1.FabricEventStream + Services map[PostgresRole]*v1.Service + Endpoints map[PostgresRole]*v1.Endpoints + PatroniEndpoints map[string]*v1.Endpoints + PatroniConfigMaps map[string]*v1.ConfigMap + Secrets map[types.UID]*v1.Secret + Statefulset *appsv1.StatefulSet + VolumeClaims map[types.UID]*v1.PersistentVolumeClaim + PrimaryPodDisruptionBudget *policyv1.PodDisruptionBudget + CriticalOpPodDisruptionBudget *policyv1.PodDisruptionBudget + LogicalBackupJob *batchv1.CronJob + Streams map[string]*zalandov1.FabricEventStream //Pods are treated separately } @@ -105,10 +106,17 @@ type Cluster struct { } type compareStatefulsetResult struct { - match bool - replace bool - rollingUpdate bool - reasons []string + match bool + replace bool + rollingUpdate bool + reasons []string + deletedPodAnnotations []string +} + +type compareLogicalBackupJobResult struct { + match bool + reasons []string + deletedPodAnnotations []string } // New creates a new cluster. This function should be called from a controller. @@ -336,14 +344,10 @@ func (c *Cluster) Create() (err error) { c.logger.Infof("secrets have been successfully created") c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Secrets", "The secrets have been successfully created") - if c.PodDisruptionBudget != nil { - return fmt.Errorf("pod disruption budget already exists in the cluster") + if err = c.createPodDisruptionBudgets(); err != nil { + return fmt.Errorf("could not create pod disruption budgets: %v", err) } - pdb, err := c.createPodDisruptionBudget() - if err != nil { - return fmt.Errorf("could not create pod disruption budget: %v", err) - } - c.logger.Infof("pod disruption budget %q has been successfully created", util.NameFromMeta(pdb.ObjectMeta)) + c.logger.Info("pod disruption budgets have been successfully created") if c.Statefulset != nil { return fmt.Errorf("statefulset already exists in the cluster") @@ -431,6 +435,7 @@ func (c *Cluster) Create() (err error) { } func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compareStatefulsetResult { + deletedPodAnnotations := []string{} reasons := make([]string, 0) var match, needsRollUpdate, needsReplace bool @@ -445,7 +450,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa needsReplace = true reasons = append(reasons, "new statefulset's ownerReferences do not match") } - if changed, reason := c.compareAnnotations(c.Statefulset.Annotations, statefulSet.Annotations); changed { + if changed, reason := c.compareAnnotations(c.Statefulset.Annotations, statefulSet.Annotations, nil); changed { match = false needsReplace = true reasons = append(reasons, "new statefulset's annotations do not match: "+reason) @@ -519,7 +524,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa } } - if changed, reason := c.compareAnnotations(c.Statefulset.Spec.Template.Annotations, statefulSet.Spec.Template.Annotations); changed { + if changed, reason := c.compareAnnotations(c.Statefulset.Spec.Template.Annotations, statefulSet.Spec.Template.Annotations, &deletedPodAnnotations); changed { match = false needsReplace = true reasons = append(reasons, "new statefulset's pod template metadata annotations does not match "+reason) @@ -541,7 +546,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa reasons = append(reasons, fmt.Sprintf("new statefulset's name for volume %d does not match the current one", i)) continue } - if changed, reason := c.compareAnnotations(c.Statefulset.Spec.VolumeClaimTemplates[i].Annotations, statefulSet.Spec.VolumeClaimTemplates[i].Annotations); changed { + if changed, reason := c.compareAnnotations(c.Statefulset.Spec.VolumeClaimTemplates[i].Annotations, statefulSet.Spec.VolumeClaimTemplates[i].Annotations, nil); changed { needsReplace = true reasons = append(reasons, fmt.Sprintf("new statefulset's annotations for volume %q do not match the current ones: %s", name, reason)) } @@ -579,7 +584,7 @@ func (c *Cluster) compareStatefulSetWith(statefulSet *appsv1.StatefulSet) *compa match = false } - return &compareStatefulsetResult{match: match, reasons: reasons, rollingUpdate: needsRollUpdate, replace: needsReplace} + return &compareStatefulsetResult{match: match, reasons: reasons, rollingUpdate: needsRollUpdate, replace: needsReplace, deletedPodAnnotations: deletedPodAnnotations} } type containerCondition func(a, b v1.Container) bool @@ -781,7 +786,7 @@ func volumeMountExists(mount v1.VolumeMount, mounts []v1.VolumeMount) bool { return false } -func (c *Cluster) compareAnnotations(old, new map[string]string) (bool, string) { +func (c *Cluster) compareAnnotations(old, new map[string]string, removedList *[]string) (bool, string) { reason := "" ignoredAnnotations := make(map[string]bool) for _, ignore := range c.OpConfig.IgnoredAnnotations { @@ -794,6 +799,9 @@ func (c *Cluster) compareAnnotations(old, new map[string]string) (bool, string) } if _, ok := new[key]; !ok { reason += fmt.Sprintf(" Removed %q.", key) + if removedList != nil { + *removedList = append(*removedList, key) + } } } @@ -833,44 +841,57 @@ func (c *Cluster) compareServices(old, new *v1.Service) (bool, string) { return false, "new service's owner references do not match the current ones" } + if !reflect.DeepEqual(old.Spec.Selector, new.Spec.Selector) { + return false, "new service's selector does not match the current one" + } + + if old.Spec.ExternalTrafficPolicy != new.Spec.ExternalTrafficPolicy { + return false, "new service's ExternalTrafficPolicy does not match the current one" + } + return true, "" } -func (c *Cluster) compareLogicalBackupJob(cur, new *batchv1.CronJob) (match bool, reason string) { +func (c *Cluster) compareLogicalBackupJob(cur, new *batchv1.CronJob) *compareLogicalBackupJobResult { + deletedPodAnnotations := []string{} + reasons := make([]string, 0) + match := true if cur.Spec.Schedule != new.Spec.Schedule { - return false, fmt.Sprintf("new job's schedule %q does not match the current one %q", - new.Spec.Schedule, cur.Spec.Schedule) + match = false + reasons = append(reasons, fmt.Sprintf("new job's schedule %q does not match the current one %q", new.Spec.Schedule, cur.Spec.Schedule)) } newImage := new.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image curImage := cur.Spec.JobTemplate.Spec.Template.Spec.Containers[0].Image if newImage != curImage { - return false, fmt.Sprintf("new job's image %q does not match the current one %q", - newImage, curImage) + match = false + reasons = append(reasons, fmt.Sprintf("new job's image %q does not match the current one %q", newImage, curImage)) } newPodAnnotation := new.Spec.JobTemplate.Spec.Template.Annotations curPodAnnotation := cur.Spec.JobTemplate.Spec.Template.Annotations - if changed, reason := c.compareAnnotations(curPodAnnotation, newPodAnnotation); changed { - return false, fmt.Sprintf("new job's pod template metadata annotations does not match " + reason) + if changed, reason := c.compareAnnotations(curPodAnnotation, newPodAnnotation, &deletedPodAnnotations); changed { + match = false + reasons = append(reasons, fmt.Sprint("new job's pod template metadata annotations do not match "+reason)) } newPgVersion := getPgVersion(new) curPgVersion := getPgVersion(cur) if newPgVersion != curPgVersion { - return false, fmt.Sprintf("new job's env PG_VERSION %q does not match the current one %q", - newPgVersion, curPgVersion) + match = false + reasons = append(reasons, fmt.Sprintf("new job's env PG_VERSION %q does not match the current one %q", newPgVersion, curPgVersion)) } needsReplace := false - reasons := make([]string, 0) - needsReplace, reasons = c.compareContainers("cronjob container", cur.Spec.JobTemplate.Spec.Template.Spec.Containers, new.Spec.JobTemplate.Spec.Template.Spec.Containers, needsReplace, reasons) + contReasons := make([]string, 0) + needsReplace, contReasons = c.compareContainers("cronjob container", cur.Spec.JobTemplate.Spec.Template.Spec.Containers, new.Spec.JobTemplate.Spec.Template.Spec.Containers, needsReplace, contReasons) if needsReplace { - return false, fmt.Sprintf("logical backup container specs do not match: %v", strings.Join(reasons, `', '`)) + match = false + reasons = append(reasons, fmt.Sprintf("logical backup container specs do not match: %v", strings.Join(contReasons, `', '`))) } - return true, "" + return &compareLogicalBackupJobResult{match: match, reasons: reasons, deletedPodAnnotations: deletedPodAnnotations} } func (c *Cluster) comparePodDisruptionBudget(cur, new *policyv1.PodDisruptionBudget) (bool, string) { @@ -881,7 +902,7 @@ func (c *Cluster) comparePodDisruptionBudget(cur, new *policyv1.PodDisruptionBud if !reflect.DeepEqual(new.ObjectMeta.OwnerReferences, cur.ObjectMeta.OwnerReferences) { return false, "new PDB's owner references do not match the current ones" } - if changed, reason := c.compareAnnotations(cur.Annotations, new.Annotations); changed { + if changed, reason := c.compareAnnotations(cur.Annotations, new.Annotations, nil); changed { return false, "new PDB's annotations do not match the current ones:" + reason } return true, "" @@ -957,6 +978,11 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { defer c.mu.Unlock() c.KubeClient.SetPostgresCRDStatus(c.clusterName(), acidv1.ClusterStatusUpdating) + + if !isInMaintenanceWindow(newSpec.Spec.MaintenanceWindows) { + // do not apply any major version related changes yet + newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion + } c.setSpec(newSpec) defer func() { @@ -1016,10 +1042,18 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { // only when streams were not specified in oldSpec but in newSpec needStreamUser := len(oldSpec.Spec.Streams) == 0 && len(newSpec.Spec.Streams) > 0 - annotationsChanged, _ := c.compareAnnotations(oldSpec.Annotations, newSpec.Annotations) - initUsers := !sameUsers || !sameRotatedUsers || needPoolerUser || needStreamUser - if initUsers { + + // if inherited annotations differ secrets have to be synced on update + newAnnotations := c.annotationsSet(nil) + oldAnnotations := make(map[string]string) + for _, secret := range c.Secrets { + oldAnnotations = secret.ObjectMeta.Annotations + break + } + annotationsChanged, _ := c.compareAnnotations(oldAnnotations, newAnnotations, nil) + + if initUsers || annotationsChanged { c.logger.Debug("initialize users") if err := c.initUsers(); err != nil { c.logger.Errorf("could not init users - skipping sync of secrets and databases: %v", err) @@ -1027,8 +1061,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { updateFailed = true return } - } - if initUsers || annotationsChanged { + c.logger.Debug("syncing secrets") //TODO: mind the secrets of the deleted/new users if err := c.syncSecrets(); err != nil { @@ -1060,9 +1093,9 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { } } - // pod disruption budget - if err := c.syncPodDisruptionBudget(true); err != nil { - c.logger.Errorf("could not sync pod disruption budget: %v", err) + // pod disruption budgets + if err := c.syncPodDisruptionBudgets(true); err != nil { + c.logger.Errorf("could not sync pod disruption budgets: %v", err) updateFailed = true } @@ -1135,6 +1168,7 @@ func (c *Cluster) Update(oldSpec, newSpec *acidv1.Postgresql) error { // streams if len(newSpec.Spec.Streams) > 0 || len(oldSpec.Spec.Streams) != len(newSpec.Spec.Streams) { + c.logger.Debug("syncing streams") if err := c.syncStreams(); err != nil { c.logger.Errorf("could not sync streams: %v", err) updateFailed = true @@ -1207,10 +1241,10 @@ func (c *Cluster) Delete() error { c.logger.Info("not deleting secrets because disabled in configuration") } - if err := c.deletePodDisruptionBudget(); err != nil { + if err := c.deletePodDisruptionBudgets(); err != nil { anyErrors = true - c.logger.Warningf("could not delete pod disruption budget: %v", err) - c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not delete pod disruption budget: %v", err) + c.logger.Warningf("could not delete pod disruption budgets: %v", err) + c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Delete", "could not delete pod disruption budgets: %v", err) } for _, role := range []PostgresRole{Master, Replica} { @@ -1709,16 +1743,17 @@ func (c *Cluster) GetCurrentProcess() Process { // GetStatus provides status of the cluster func (c *Cluster) GetStatus() *ClusterStatus { status := &ClusterStatus{ - Cluster: c.Name, - Namespace: c.Namespace, - Team: c.Spec.TeamID, - Status: c.Status, - Spec: c.Spec, - MasterService: c.GetServiceMaster(), - ReplicaService: c.GetServiceReplica(), - StatefulSet: c.GetStatefulSet(), - PodDisruptionBudget: c.GetPodDisruptionBudget(), - CurrentProcess: c.GetCurrentProcess(), + Cluster: c.Name, + Namespace: c.Namespace, + Team: c.Spec.TeamID, + Status: c.Status, + Spec: c.Spec, + MasterService: c.GetServiceMaster(), + ReplicaService: c.GetServiceReplica(), + StatefulSet: c.GetStatefulSet(), + PrimaryPodDisruptionBudget: c.GetPrimaryPodDisruptionBudget(), + CriticalOpPodDisruptionBudget: c.GetCriticalOpPodDisruptionBudget(), + CurrentProcess: c.GetCurrentProcess(), Error: fmt.Errorf("error: %s", c.Error), } @@ -1731,18 +1766,62 @@ func (c *Cluster) GetStatus() *ClusterStatus { return status } -// Switchover does a switchover (via Patroni) to a candidate pod -func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) error { +func (c *Cluster) GetSwitchoverSchedule() string { + now := time.Now().UTC() + return c.getSwitchoverScheduleAtTime(now) +} +func (c *Cluster) getSwitchoverScheduleAtTime(now time.Time) string { + var possibleSwitchover, schedule time.Time + + for _, window := range c.Spec.MaintenanceWindows { + // in the best case it is possible today + possibleSwitchover = time.Date(now.Year(), now.Month(), now.Day(), window.StartTime.Hour(), window.StartTime.Minute(), 0, 0, time.UTC) + if window.Everyday { + if now.After(possibleSwitchover) { + // we are already past the time for today, try tomorrow + possibleSwitchover = possibleSwitchover.AddDate(0, 0, 1) + } + } else { + if now.Weekday() != window.Weekday { + // get closest possible time for this window + possibleSwitchover = possibleSwitchover.AddDate(0, 0, int((7+window.Weekday-now.Weekday())%7)) + } else if now.After(possibleSwitchover) { + // we are already past the time for today, try next week + possibleSwitchover = possibleSwitchover.AddDate(0, 0, 7) + } + } + + if (schedule.Equal(time.Time{})) || possibleSwitchover.Before(schedule) { + schedule = possibleSwitchover + } + } + return schedule.Format("2006-01-02T15:04+00") +} + +// Switchover does a switchover (via Patroni) to a candidate pod +func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName, scheduled bool) error { var err error - c.logger.Debugf("switching over from %q to %q", curMaster.Name, candidate) - c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Switching over from %q to %q", curMaster.Name, candidate) + stopCh := make(chan struct{}) ch := c.registerPodSubscriber(candidate) defer c.unregisterPodSubscriber(candidate) defer close(stopCh) - if err = c.patroni.Switchover(curMaster, candidate.Name); err == nil { + var scheduled_at string + if scheduled { + scheduled_at = c.GetSwitchoverSchedule() + } else { + c.logger.Debugf("switching over from %q to %q", curMaster.Name, candidate) + c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Switching over from %q to %q", curMaster.Name, candidate) + scheduled_at = "" + } + + if err = c.patroni.Switchover(curMaster, candidate.Name, scheduled_at); err == nil { + if scheduled { + c.logger.Infof("switchover from %q to %q is scheduled at %s", curMaster.Name, candidate, scheduled_at) + return nil + } c.logger.Debugf("successfully switched over from %q to %q", curMaster.Name, candidate) c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Successfully switched over from %q to %q", curMaster.Name, candidate) _, err = c.waitForPodLabel(ch, stopCh, nil) @@ -1750,6 +1829,9 @@ func (c *Cluster) Switchover(curMaster *v1.Pod, candidate spec.NamespacedName) e err = fmt.Errorf("could not get master pod label: %v", err) } } else { + if scheduled { + return fmt.Errorf("could not schedule switchover: %v", err) + } err = fmt.Errorf("could not switch over from %q to %q: %v", curMaster.Name, candidate, err) c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Switchover", "Switchover from %q to %q FAILED: %v", curMaster.Name, candidate, err) } diff --git a/pkg/cluster/cluster_test.go b/pkg/cluster/cluster_test.go index 897ed6c0d..d78d4c92e 100644 --- a/pkg/cluster/cluster_test.go +++ b/pkg/cluster/cluster_test.go @@ -1341,14 +1341,21 @@ func TestCompareEnv(t *testing.T) { } } -func newService(ann map[string]string, svcT v1.ServiceType, lbSr []string) *v1.Service { +func newService( + annotations map[string]string, + svcType v1.ServiceType, + sourceRanges []string, + selector map[string]string, + policy v1.ServiceExternalTrafficPolicyType) *v1.Service { svc := &v1.Service{ Spec: v1.ServiceSpec{ - Type: svcT, - LoadBalancerSourceRanges: lbSr, + Selector: selector, + Type: svcType, + LoadBalancerSourceRanges: sourceRanges, + ExternalTrafficPolicy: policy, }, } - svc.Annotations = ann + svc.Annotations = annotations return svc } @@ -1365,13 +1372,18 @@ func TestCompareServices(t *testing.T) { }, } + defaultPolicy := v1.ServiceExternalTrafficPolicyTypeCluster + serviceWithOwnerReference := newService( map[string]string{ constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, }, v1.ServiceTypeClusterIP, - []string{"128.141.0.0/16", "137.138.0.0/16"}) + []string{"128.141.0.0/16", "137.138.0.0/16"}, + nil, + defaultPolicy, + ) ownerRef := metav1.OwnerReference{ APIVersion: "acid.zalan.do/v1", @@ -1397,14 +1409,16 @@ func TestCompareServices(t *testing.T) { constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, }, v1.ServiceTypeClusterIP, - []string{"128.141.0.0/16", "137.138.0.0/16"}), + []string{"128.141.0.0/16", "137.138.0.0/16"}, + nil, defaultPolicy), new: newService( map[string]string{ constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, }, v1.ServiceTypeClusterIP, - []string{"128.141.0.0/16", "137.138.0.0/16"}), + []string{"128.141.0.0/16", "137.138.0.0/16"}, + nil, defaultPolicy), match: true, }, { @@ -1415,14 +1429,16 @@ func TestCompareServices(t *testing.T) { constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, }, v1.ServiceTypeClusterIP, - []string{"128.141.0.0/16", "137.138.0.0/16"}), + []string{"128.141.0.0/16", "137.138.0.0/16"}, + nil, defaultPolicy), new: newService( map[string]string{ constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, }, v1.ServiceTypeLoadBalancer, - []string{"128.141.0.0/16", "137.138.0.0/16"}), + []string{"128.141.0.0/16", "137.138.0.0/16"}, + nil, defaultPolicy), match: false, reason: `new service's type "LoadBalancer" does not match the current one "ClusterIP"`, }, @@ -1434,14 +1450,16 @@ func TestCompareServices(t *testing.T) { constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, }, v1.ServiceTypeLoadBalancer, - []string{"128.141.0.0/16", "137.138.0.0/16"}), + []string{"128.141.0.0/16", "137.138.0.0/16"}, + nil, defaultPolicy), new: newService( map[string]string{ constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, }, v1.ServiceTypeLoadBalancer, - []string{"185.249.56.0/22"}), + []string{"185.249.56.0/22"}, + nil, defaultPolicy), match: false, reason: `new service's LoadBalancerSourceRange does not match the current one`, }, @@ -1453,14 +1471,16 @@ func TestCompareServices(t *testing.T) { constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, }, v1.ServiceTypeLoadBalancer, - []string{"128.141.0.0/16", "137.138.0.0/16"}), + []string{"128.141.0.0/16", "137.138.0.0/16"}, + nil, defaultPolicy), new: newService( map[string]string{ constants.ZalandoDNSNameAnnotation: "clstr.acid.zalan.do", constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, }, v1.ServiceTypeLoadBalancer, - []string{}), + []string{}, + nil, defaultPolicy), match: false, reason: `new service's LoadBalancerSourceRange does not match the current one`, }, @@ -1472,10 +1492,39 @@ func TestCompareServices(t *testing.T) { constants.ElbTimeoutAnnotationName: constants.ElbTimeoutAnnotationValue, }, v1.ServiceTypeClusterIP, - []string{"128.141.0.0/16", "137.138.0.0/16"}), + []string{"128.141.0.0/16", "137.138.0.0/16"}, + nil, defaultPolicy), new: serviceWithOwnerReference, match: false, }, + { + about: "new service has a label selector", + current: newService( + map[string]string{}, + v1.ServiceTypeClusterIP, + []string{}, + nil, defaultPolicy), + new: newService( + map[string]string{}, + v1.ServiceTypeClusterIP, + []string{}, + map[string]string{"cluster-name": "clstr", "spilo-role": "master"}, defaultPolicy), + match: false, + }, + { + about: "services differ on external traffic policy", + current: newService( + map[string]string{}, + v1.ServiceTypeClusterIP, + []string{}, + nil, defaultPolicy), + new: newService( + map[string]string{}, + v1.ServiceTypeClusterIP, + []string{}, + nil, v1.ServiceExternalTrafficPolicyTypeLocal), + match: false, + }, } for _, tt := range tests { @@ -1680,12 +1729,20 @@ func TestCompareLogicalBackupJob(t *testing.T) { } } - match, reason := cluster.compareLogicalBackupJob(currentCronJob, desiredCronJob) - if match != tt.match { - t.Errorf("%s - unexpected match result %t when comparing cronjobs %#v and %#v", t.Name(), match, currentCronJob, desiredCronJob) - } else { - if !strings.HasPrefix(reason, tt.reason) { - t.Errorf("%s - expected reason prefix %s, found %s", t.Name(), tt.reason, reason) + cmp := cluster.compareLogicalBackupJob(currentCronJob, desiredCronJob) + if cmp.match != tt.match { + t.Errorf("%s - unexpected match result %t when comparing cronjobs %#v and %#v", t.Name(), cmp.match, currentCronJob, desiredCronJob) + } else if !cmp.match { + found := false + for _, reason := range cmp.reasons { + if strings.HasPrefix(reason, tt.reason) { + found = true + break + } + found = false + } + if !found { + t.Errorf("%s - expected reason prefix %s, not found in %#v", t.Name(), tt.reason, cmp.reasons) } } }) @@ -2057,3 +2114,91 @@ func TestCompareVolumeMounts(t *testing.T) { }) } } + +func TestGetSwitchoverSchedule(t *testing.T) { + now, _ := time.Parse(time.RFC3339, "2025-11-11T12:35:00Z") + + futureTimeStart := now.Add(1 * time.Hour) + futureWindowTimeStart := futureTimeStart.Format("15:04") + futureWindowTimeEnd := now.Add(2 * time.Hour).Format("15:04") + pastTimeStart := now.Add(-2 * time.Hour) + pastWindowTimeStart := pastTimeStart.Format("15:04") + pastWindowTimeEnd := now.Add(-1 * time.Hour).Format("15:04") + + tests := []struct { + name string + windows []acidv1.MaintenanceWindow + expected string + }{ + { + name: "everyday maintenance windows is later today", + windows: []acidv1.MaintenanceWindow{ + { + Everyday: true, + StartTime: mustParseTime(futureWindowTimeStart), + EndTime: mustParseTime(futureWindowTimeEnd), + }, + }, + expected: futureTimeStart.Format("2006-01-02T15:04+00"), + }, + { + name: "everyday maintenance window is tomorrow", + windows: []acidv1.MaintenanceWindow{ + { + Everyday: true, + StartTime: mustParseTime(pastWindowTimeStart), + EndTime: mustParseTime(pastWindowTimeEnd), + }, + }, + expected: pastTimeStart.AddDate(0, 0, 1).Format("2006-01-02T15:04+00"), + }, + { + name: "weekday maintenance windows is later today", + windows: []acidv1.MaintenanceWindow{ + { + Weekday: now.Weekday(), + StartTime: mustParseTime(futureWindowTimeStart), + EndTime: mustParseTime(futureWindowTimeEnd), + }, + }, + expected: futureTimeStart.Format("2006-01-02T15:04+00"), + }, + { + name: "weekday maintenance windows is passed for today", + windows: []acidv1.MaintenanceWindow{ + { + Weekday: now.Weekday(), + StartTime: mustParseTime(pastWindowTimeStart), + EndTime: mustParseTime(pastWindowTimeEnd), + }, + }, + expected: pastTimeStart.AddDate(0, 0, 7).Format("2006-01-02T15:04+00"), + }, + { + name: "choose the earliest window", + windows: []acidv1.MaintenanceWindow{ + { + Weekday: now.AddDate(0, 0, 2).Weekday(), + StartTime: mustParseTime(futureWindowTimeStart), + EndTime: mustParseTime(futureWindowTimeEnd), + }, + { + Everyday: true, + StartTime: mustParseTime(pastWindowTimeStart), + EndTime: mustParseTime(pastWindowTimeEnd), + }, + }, + expected: pastTimeStart.AddDate(0, 0, 1).Format("2006-01-02T15:04+00"), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cluster.Spec.MaintenanceWindows = tt.windows + schedule := cluster.getSwitchoverScheduleAtTime(now) + if schedule != tt.expected { + t.Errorf("Expected GetSwitchoverSchedule to return %s, returned: %s", tt.expected, schedule) + } + }) + } +} diff --git a/pkg/cluster/connection_pooler.go b/pkg/cluster/connection_pooler.go index 6cd46f745..ac4ce67d8 100644 --- a/pkg/cluster/connection_pooler.go +++ b/pkg/cluster/connection_pooler.go @@ -2,6 +2,7 @@ package cluster import ( "context" + "encoding/json" "fmt" "reflect" "strings" @@ -977,6 +978,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql err error ) + updatedPodAnnotations := map[string]*string{} syncReason := make([]string, 0) deployment, err = c.KubeClient. Deployments(c.Namespace). @@ -1038,9 +1040,27 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql } newPodAnnotations := c.annotationsSet(c.generatePodAnnotations(&c.Spec)) - if changed, reason := c.compareAnnotations(deployment.Spec.Template.Annotations, newPodAnnotations); changed { + deletedPodAnnotations := []string{} + if changed, reason := c.compareAnnotations(deployment.Spec.Template.Annotations, newPodAnnotations, &deletedPodAnnotations); changed { specSync = true syncReason = append(syncReason, []string{"new connection pooler's pod template annotations do not match the current ones: " + reason}...) + + for _, anno := range deletedPodAnnotations { + updatedPodAnnotations[anno] = nil + } + templateMetadataReq := map[string]map[string]map[string]map[string]map[string]*string{ + "spec": {"template": {"metadata": {"annotations": updatedPodAnnotations}}}} + patch, err := json.Marshal(templateMetadataReq) + if err != nil { + return nil, fmt.Errorf("could not marshal ObjectMeta for %s connection pooler's pod template: %v", role, err) + } + deployment, err = c.KubeClient.Deployments(c.Namespace).Patch(context.TODO(), + deployment.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "") + if err != nil { + c.logger.Errorf("failed to patch %s connection pooler's pod template: %v", role, err) + return nil, err + } + deployment.Spec.Template.Annotations = newPodAnnotations } @@ -1064,7 +1084,7 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql } newAnnotations := c.AnnotationsToPropagate(c.annotationsSet(nil)) // including the downscaling annotations - if changed, _ := c.compareAnnotations(deployment.Annotations, newAnnotations); changed { + if changed, _ := c.compareAnnotations(deployment.Annotations, newAnnotations, nil); changed { deployment, err = patchConnectionPoolerAnnotations(c.KubeClient, deployment, newAnnotations) if err != nil { return nil, err @@ -1098,14 +1118,20 @@ func (c *Cluster) syncConnectionPoolerWorker(oldSpec, newSpec *acidv1.Postgresql if err != nil { return nil, fmt.Errorf("could not delete pooler pod: %v", err) } - } else if changed, _ := c.compareAnnotations(pod.Annotations, deployment.Spec.Template.Annotations); changed { - patchData, err := metaAnnotationsPatch(deployment.Spec.Template.Annotations) - if err != nil { - return nil, fmt.Errorf("could not form patch for pooler's pod annotations: %v", err) + } else if changed, _ := c.compareAnnotations(pod.Annotations, deployment.Spec.Template.Annotations, nil); changed { + metadataReq := map[string]map[string]map[string]*string{"metadata": {}} + + for anno, val := range deployment.Spec.Template.Annotations { + updatedPodAnnotations[anno] = &val } - _, err = c.KubeClient.Pods(pod.Namespace).Patch(context.TODO(), pod.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) + metadataReq["metadata"]["annotations"] = updatedPodAnnotations + patch, err := json.Marshal(metadataReq) if err != nil { - return nil, fmt.Errorf("could not patch annotations for pooler's pod %q: %v", pod.Name, err) + return nil, fmt.Errorf("could not marshal ObjectMeta for %s connection pooler's pods: %v", role, err) + } + _, err = c.KubeClient.Pods(pod.Namespace).Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}) + if err != nil { + return nil, fmt.Errorf("could not patch annotations for %s connection pooler's pod %q: %v", role, pod.Name, err) } } } diff --git a/pkg/cluster/database.go b/pkg/cluster/database.go index aac877bcf..56b5f3638 100644 --- a/pkg/cluster/database.go +++ b/pkg/cluster/database.go @@ -281,9 +281,23 @@ func findUsersFromRotation(rotatedUsers []string, db *sql.DB) (map[string]string return extraUsers, nil } -func (c *Cluster) cleanupRotatedUsers(rotatedUsers []string, db *sql.DB) error { +func (c *Cluster) cleanupRotatedUsers(rotatedUsers []string) error { c.setProcessName("checking for rotated users to remove from the database due to configured retention") - extraUsers, err := findUsersFromRotation(rotatedUsers, db) + + err := c.initDbConn() + if err != nil { + return fmt.Errorf("could not init db connection: %v", err) + } + defer func() { + if c.connectionIsClosed() { + return + } + if err := c.closeDbConn(); err != nil { + c.logger.Errorf("could not close database connection after removing users exceeding configured retention interval: %v", err) + } + }() + + extraUsers, err := findUsersFromRotation(rotatedUsers, c.pgDb) if err != nil { return fmt.Errorf("error when querying for deprecated users from password rotation: %v", err) } @@ -304,7 +318,7 @@ func (c *Cluster) cleanupRotatedUsers(rotatedUsers []string, db *sql.DB) error { } if retentionDate.After(userCreationDate) { c.logger.Infof("dropping user %q due to configured days in password_rotation_user_retention", rotatedUser) - if err = users.DropPgUser(rotatedUser, db); err != nil { + if err = users.DropPgUser(rotatedUser, c.pgDb); err != nil { c.logger.Errorf("could not drop role %q: %v", rotatedUser, err) continue } diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index ff5536303..9bc39a9db 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -4,7 +4,9 @@ import ( "context" "encoding/json" "fmt" + "maps" "path" + "slices" "sort" "strings" @@ -12,19 +14,16 @@ import ( "github.com/sirupsen/logrus" appsv1 "k8s.io/api/apps/v1" + batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/types" "k8s.io/apimachinery/pkg/util/intstr" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" - batchv1 "k8s.io/api/batch/v1" - "k8s.io/apimachinery/pkg/labels" - acidv1 "github.com/zalando/postgres-operator/pkg/apis/acid.zalan.do/v1" "github.com/zalando/postgres-operator/pkg/spec" "github.com/zalando/postgres-operator/pkg/util" @@ -109,10 +108,15 @@ func (c *Cluster) servicePort(role PostgresRole) int32 { return pgPort } -func (c *Cluster) podDisruptionBudgetName() string { +func (c *Cluster) PrimaryPodDisruptionBudgetName() string { return c.OpConfig.PDBNameFormat.Format("cluster", c.Name) } +func (c *Cluster) criticalOpPodDisruptionBudgetName() string { + pdbTemplate := config.StringTemplate("postgres-{cluster}-critical-op-pdb") + return pdbTemplate.Format("cluster", c.Name) +} + func makeDefaultResources(config *config.Config) acidv1.Resources { defaultRequests := acidv1.ResourceDescription{ @@ -166,7 +170,7 @@ func (c *Cluster) enforceMinResourceLimits(resources *v1.ResourceRequirements) e if isSmaller { msg = fmt.Sprintf("defined CPU limit %s for %q container is below required minimum %s and will be increased", cpuLimit.String(), constants.PostgresContainerName, minCPULimit) - c.logger.Warningf(msg) + c.logger.Warningf("%s", msg) c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", msg) resources.Limits[v1.ResourceCPU], _ = resource.ParseQuantity(minCPULimit) } @@ -183,7 +187,7 @@ func (c *Cluster) enforceMinResourceLimits(resources *v1.ResourceRequirements) e if isSmaller { msg = fmt.Sprintf("defined memory limit %s for %q container is below required minimum %s and will be increased", memoryLimit.String(), constants.PostgresContainerName, minMemoryLimit) - c.logger.Warningf(msg) + c.logger.Warningf("%s", msg) c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "ResourceLimits", msg) resources.Limits[v1.ResourceMemory], _ = resource.ParseQuantity(minMemoryLimit) } @@ -519,13 +523,14 @@ func (c *Cluster) nodeAffinity(nodeReadinessLabel map[string]string, nodeAffinit }, } } else { - if c.OpConfig.NodeReadinessLabelMerge == "OR" { + switch c.OpConfig.NodeReadinessLabelMerge { + case "OR": manifestTerms := nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms manifestTerms = append(manifestTerms, nodeReadinessSelectorTerm) nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution = &v1.NodeSelector{ NodeSelectorTerms: manifestTerms, } - } else if c.OpConfig.NodeReadinessLabelMerge == "AND" { + case "AND": for i, nodeSelectorTerm := range nodeAffinityCopy.RequiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms { manifestExpressions := nodeSelectorTerm.MatchExpressions manifestExpressions = append(manifestExpressions, matchExpressions...) @@ -1005,6 +1010,9 @@ func (c *Cluster) generateSpiloPodEnvVars( if c.patroniUsesKubernetes() { envVars = append(envVars, v1.EnvVar{Name: "DCS_ENABLE_KUBERNETES_API", Value: "true"}) + if c.OpConfig.EnablePodDisruptionBudget != nil && *c.OpConfig.EnablePodDisruptionBudget { + envVars = append(envVars, v1.EnvVar{Name: "KUBERNETES_BOOTSTRAP_LABELS", Value: "{\"critical-operation\":\"true\"}"}) + } } else { envVars = append(envVars, v1.EnvVar{Name: "ETCD_HOST", Value: c.OpConfig.EtcdHost}) } @@ -1290,11 +1298,14 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef return nil, fmt.Errorf("could not generate resource requirements: %v", err) } - if spec.InitContainers != nil && len(spec.InitContainers) > 0 { + if len(spec.InitContainers) > 0 { if c.OpConfig.EnableInitContainers != nil && !(*c.OpConfig.EnableInitContainers) { c.logger.Warningf("initContainers specified but disabled in configuration - next statefulset creation would fail") } initContainers = spec.InitContainers + if err := c.validateContainers(initContainers); err != nil { + return nil, fmt.Errorf("invalid init containers: %v", err) + } } // backward compatible check for InitContainers @@ -1393,7 +1404,7 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef // generate container specs for sidecars specified in the cluster manifest clusterSpecificSidecars := []v1.Container{} - if spec.Sidecars != nil && len(spec.Sidecars) > 0 { + if len(spec.Sidecars) > 0 { // warn if sidecars are defined, but globally disabled (does not apply to globally defined sidecars) if c.OpConfig.EnableSidecars != nil && !(*c.OpConfig.EnableSidecars) { c.logger.Warningf("sidecars specified but disabled in configuration - next statefulset creation would fail") @@ -1447,6 +1458,10 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef sidecarContainers = patchSidecarContainers(sidecarContainers, volumeMounts, c.OpConfig.SuperUsername, c.credentialSecretName(c.OpConfig.SuperUsername)) + if err := c.validateContainers(sidecarContainers); err != nil { + return nil, fmt.Errorf("invalid sidecar containers: %v", err) + } + tolerationSpec := tolerations(&spec.Tolerations, c.OpConfig.PodToleration) effectivePodPriorityClassName := util.Coalesce(spec.PodPriorityClassName, c.OpConfig.PodPriorityClassName) @@ -1497,11 +1512,12 @@ func (c *Cluster) generateStatefulSet(spec *acidv1.PostgresSpec) (*appsv1.Statef updateStrategy := appsv1.StatefulSetUpdateStrategy{Type: appsv1.OnDeleteStatefulSetStrategyType} var podManagementPolicy appsv1.PodManagementPolicyType - if c.OpConfig.PodManagementPolicy == "ordered_ready" { + switch c.OpConfig.PodManagementPolicy { + case "ordered_ready": podManagementPolicy = appsv1.OrderedReadyPodManagement - } else if c.OpConfig.PodManagementPolicy == "parallel" { + case "parallel": podManagementPolicy = appsv1.ParallelPodManagement - } else { + default: return nil, fmt.Errorf("could not set the pod management policy to the unknown value: %v", c.OpConfig.PodManagementPolicy) } @@ -1920,7 +1936,7 @@ func (c *Cluster) generateSingleUserSecret(pgUser spec.PgUser) *v1.Secret { // if secret lives in another namespace we cannot set ownerReferences var ownerReferences []metav1.OwnerReference - if c.Config.OpConfig.EnableCrossNamespaceSecret && strings.Contains(username, ".") { + if c.Config.OpConfig.EnableCrossNamespaceSecret && c.Postgresql.ObjectMeta.Namespace != pgUser.Namespace { ownerReferences = nil } else { ownerReferences = c.ownerReferences() @@ -2207,7 +2223,7 @@ func (c *Cluster) generateStandbyEnvironment(description *acidv1.StandbyDescript return result } -func (c *Cluster) generatePodDisruptionBudget() *policyv1.PodDisruptionBudget { +func (c *Cluster) generatePrimaryPodDisruptionBudget() *policyv1.PodDisruptionBudget { minAvailable := intstr.FromInt(1) pdbEnabled := c.OpConfig.EnablePodDisruptionBudget pdbMasterLabelSelector := c.OpConfig.PDBMasterLabelSelector @@ -2225,7 +2241,36 @@ func (c *Cluster) generatePodDisruptionBudget() *policyv1.PodDisruptionBudget { return &policyv1.PodDisruptionBudget{ ObjectMeta: metav1.ObjectMeta{ - Name: c.podDisruptionBudgetName(), + Name: c.PrimaryPodDisruptionBudgetName(), + Namespace: c.Namespace, + Labels: c.labelsSet(true), + Annotations: c.annotationsSet(nil), + OwnerReferences: c.ownerReferences(), + }, + Spec: policyv1.PodDisruptionBudgetSpec{ + MinAvailable: &minAvailable, + Selector: &metav1.LabelSelector{ + MatchLabels: labels, + }, + }, + } +} + +func (c *Cluster) generateCriticalOpPodDisruptionBudget() *policyv1.PodDisruptionBudget { + minAvailable := intstr.FromInt32(c.Spec.NumberOfInstances) + pdbEnabled := c.OpConfig.EnablePodDisruptionBudget + + // if PodDisruptionBudget is disabled or if there are no DB pods, set the budget to 0. + if (pdbEnabled != nil && !(*pdbEnabled)) || c.Spec.NumberOfInstances <= 0 { + minAvailable = intstr.FromInt(0) + } + + labels := c.labelsSet(false) + labels["critical-operation"] = "true" + + return &policyv1.PodDisruptionBudget{ + ObjectMeta: metav1.ObjectMeta{ + Name: c.criticalOpPodDisruptionBudgetName(), Namespace: c.Namespace, Labels: c.labelsSet(true), Annotations: c.annotationsSet(nil), @@ -2554,3 +2599,15 @@ func ensurePath(file string, defaultDir string, defaultFile string) string { } return file } + +func (c *Cluster) validateContainers(containers []v1.Container) error { + for i, container := range containers { + if container.Name == "" { + return fmt.Errorf("container[%d]: name is required", i) + } + if container.Image == "" { + return fmt.Errorf("container '%v': image is required", container.Name) + } + } + return nil +} diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index 612e4525a..6bd87366d 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -1935,7 +1935,8 @@ func TestAdditionalVolume(t *testing.T) { AdditionalVolumes: additionalVolumes, Sidecars: []acidv1.Sidecar{ { - Name: sidecarName, + Name: sidecarName, + DockerImage: "test-image", }, }, }, @@ -2163,10 +2164,12 @@ func TestSidecars(t *testing.T) { }, Sidecars: []acidv1.Sidecar{ { - Name: "cluster-specific-sidecar", + Name: "cluster-specific-sidecar", + DockerImage: "test-image", }, { - Name: "cluster-specific-sidecar-with-resources", + Name: "cluster-specific-sidecar-with-resources", + DockerImage: "test-image", Resources: &acidv1.Resources{ ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("210m"), Memory: k8sutil.StringToPointer("0.8Gi")}, ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("510m"), Memory: k8sutil.StringToPointer("1.4Gi")}, @@ -2201,7 +2204,8 @@ func TestSidecars(t *testing.T) { }, SidecarContainers: []v1.Container{ { - Name: "global-sidecar", + Name: "global-sidecar", + Image: "test-image", }, // will be replaced by a cluster specific sidecar with the same name { @@ -2271,6 +2275,7 @@ func TestSidecars(t *testing.T) { // cluster specific sidecar assert.Contains(t, s.Spec.Template.Spec.Containers, v1.Container{ Name: "cluster-specific-sidecar", + Image: "test-image", Env: env, Resources: generateKubernetesResources("200m", "500m", "0.7Gi", "1.3Gi"), ImagePullPolicy: v1.PullIfNotPresent, @@ -2297,6 +2302,7 @@ func TestSidecars(t *testing.T) { // global sidecar assert.Contains(t, s.Spec.Template.Spec.Containers, v1.Container{ Name: "global-sidecar", + Image: "test-image", Env: env, VolumeMounts: mounts, }) @@ -2325,6 +2331,180 @@ func TestSidecars(t *testing.T) { } +func TestContainerValidation(t *testing.T) { + testCases := []struct { + name string + spec acidv1.PostgresSpec + clusterConfig Config + expectedError string + }{ + { + name: "init container without image", + spec: acidv1.PostgresSpec{ + PostgresqlParam: acidv1.PostgresqlParam{ + PgVersion: "17", + }, + TeamID: "myapp", + NumberOfInstances: 1, + Volume: acidv1.Volume{ + Size: "1G", + }, + InitContainers: []v1.Container{ + { + Name: "invalid-initcontainer", + }, + }, + }, + clusterConfig: Config{ + OpConfig: config.Config{ + PodManagementPolicy: "ordered_ready", + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + }, + }, + expectedError: "image is required", + }, + { + name: "sidecar without name", + spec: acidv1.PostgresSpec{ + PostgresqlParam: acidv1.PostgresqlParam{ + PgVersion: "17", + }, + TeamID: "myapp", + NumberOfInstances: 1, + Volume: acidv1.Volume{ + Size: "1G", + }, + }, + clusterConfig: Config{ + OpConfig: config.Config{ + PodManagementPolicy: "ordered_ready", + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + SidecarContainers: []v1.Container{ + { + Image: "test-image", + }, + }, + }, + }, + expectedError: "name is required", + }, + { + name: "sidecar without image", + spec: acidv1.PostgresSpec{ + PostgresqlParam: acidv1.PostgresqlParam{ + PgVersion: "17", + }, + TeamID: "myapp", + NumberOfInstances: 1, + Volume: acidv1.Volume{ + Size: "1G", + }, + Sidecars: []acidv1.Sidecar{ + { + Name: "invalid-sidecar", + }, + }, + }, + clusterConfig: Config{ + OpConfig: config.Config{ + PodManagementPolicy: "ordered_ready", + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + }, + }, + expectedError: "image is required", + }, + { + name: "valid containers pass validation", + spec: acidv1.PostgresSpec{ + PostgresqlParam: acidv1.PostgresqlParam{ + PgVersion: "17", + }, + TeamID: "myapp", + NumberOfInstances: 1, + Volume: acidv1.Volume{ + Size: "1G", + }, + Sidecars: []acidv1.Sidecar{ + { + Name: "valid-sidecar", + DockerImage: "busybox:latest", + }, + }, + InitContainers: []v1.Container{ + { + Name: "valid-initcontainer", + Image: "alpine:latest", + }, + }, + }, + clusterConfig: Config{ + OpConfig: config.Config{ + PodManagementPolicy: "ordered_ready", + ProtectedRoles: []string{"admin"}, + Auth: config.Auth{ + SuperUsername: superUserName, + ReplicationUsername: replicationUserName, + }, + }, + }, + expectedError: "", + }, + { + name: "multiple invalid sidecars", + spec: acidv1.PostgresSpec{ + Sidecars: []acidv1.Sidecar{ + { + Name: "sidecar1", + }, + { + Name: "sidecar2", + }, + }, + }, + expectedError: "image is required", + }, + { + name: "empty container name and image", + spec: acidv1.PostgresSpec{ + InitContainers: []v1.Container{ + { + Name: "", + Image: "", + }, + }, + }, + expectedError: "name is required", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + cluster := New(tc.clusterConfig, k8sutil.KubernetesClient{}, acidv1.Postgresql{}, logger, eventRecorder) + + _, err := cluster.generateStatefulSet(&tc.spec) + + if tc.expectedError != "" { + assert.Error(t, err) + assert.Contains(t, err.Error(), tc.expectedError) + } else { + assert.NoError(t, err) + } + }) + } +} + func TestGeneratePodDisruptionBudget(t *testing.T) { testName := "Test PodDisruptionBudget spec generation" @@ -2349,22 +2529,34 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { } } - testLabelsAndSelectors := func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error { - masterLabelSelectorDisabled := cluster.OpConfig.PDBMasterLabelSelector != nil && !*cluster.OpConfig.PDBMasterLabelSelector - if podDisruptionBudget.ObjectMeta.Namespace != "myapp" { - return fmt.Errorf("Object Namespace incorrect.") - } - if !reflect.DeepEqual(podDisruptionBudget.Labels, map[string]string{"team": "myapp", "cluster-name": "myapp-database"}) { - return fmt.Errorf("Labels incorrect.") - } - if !masterLabelSelectorDisabled && - !reflect.DeepEqual(podDisruptionBudget.Spec.Selector, &metav1.LabelSelector{ - MatchLabels: map[string]string{"spilo-role": "master", "cluster-name": "myapp-database"}}) { + testLabelsAndSelectors := func(isPrimary bool) func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error { + return func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error { + masterLabelSelectorDisabled := cluster.OpConfig.PDBMasterLabelSelector != nil && !*cluster.OpConfig.PDBMasterLabelSelector + if podDisruptionBudget.ObjectMeta.Namespace != "myapp" { + return fmt.Errorf("Object Namespace incorrect.") + } + expectedLabels := map[string]string{"team": "myapp", "cluster-name": "myapp-database"} + if !reflect.DeepEqual(podDisruptionBudget.Labels, expectedLabels) { + return fmt.Errorf("Labels incorrect, got %#v, expected %#v", podDisruptionBudget.Labels, expectedLabels) + } + if !masterLabelSelectorDisabled { + if isPrimary { + expectedLabels := &metav1.LabelSelector{ + MatchLabels: map[string]string{"spilo-role": "master", "cluster-name": "myapp-database"}} + if !reflect.DeepEqual(podDisruptionBudget.Spec.Selector, expectedLabels) { + return fmt.Errorf("MatchLabels incorrect, got %#v, expected %#v", podDisruptionBudget.Spec.Selector, expectedLabels) + } + } else { + expectedLabels := &metav1.LabelSelector{ + MatchLabels: map[string]string{"cluster-name": "myapp-database", "critical-operation": "true"}} + if !reflect.DeepEqual(podDisruptionBudget.Spec.Selector, expectedLabels) { + return fmt.Errorf("MatchLabels incorrect, got %#v, expected %#v", podDisruptionBudget.Spec.Selector, expectedLabels) + } + } + } - return fmt.Errorf("MatchLabels incorrect.") + return nil } - - return nil } testPodDisruptionBudgetOwnerReference := func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error { @@ -2400,7 +2592,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { testPodDisruptionBudgetOwnerReference, hasName("postgres-myapp-database-pdb"), hasMinAvailable(1), - testLabelsAndSelectors, + testLabelsAndSelectors(true), }, }, { @@ -2417,7 +2609,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { testPodDisruptionBudgetOwnerReference, hasName("postgres-myapp-database-pdb"), hasMinAvailable(0), - testLabelsAndSelectors, + testLabelsAndSelectors(true), }, }, { @@ -2434,7 +2626,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { testPodDisruptionBudgetOwnerReference, hasName("postgres-myapp-database-pdb"), hasMinAvailable(0), - testLabelsAndSelectors, + testLabelsAndSelectors(true), }, }, { @@ -2451,7 +2643,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { testPodDisruptionBudgetOwnerReference, hasName("postgres-myapp-database-databass-budget"), hasMinAvailable(1), - testLabelsAndSelectors, + testLabelsAndSelectors(true), }, }, { @@ -2468,7 +2660,7 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { testPodDisruptionBudgetOwnerReference, hasName("postgres-myapp-database-pdb"), hasMinAvailable(1), - testLabelsAndSelectors, + testLabelsAndSelectors(true), }, }, { @@ -2485,13 +2677,99 @@ func TestGeneratePodDisruptionBudget(t *testing.T) { testPodDisruptionBudgetOwnerReference, hasName("postgres-myapp-database-pdb"), hasMinAvailable(1), - testLabelsAndSelectors, + testLabelsAndSelectors(true), }, }, } for _, tt := range tests { - result := tt.spec.generatePodDisruptionBudget() + result := tt.spec.generatePrimaryPodDisruptionBudget() + for _, check := range tt.check { + err := check(tt.spec, result) + if err != nil { + t.Errorf("%s [%s]: PodDisruptionBudget spec is incorrect, %+v", + testName, tt.scenario, err) + } + } + } + + testCriticalOp := []struct { + scenario string + spec *Cluster + check []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error + }{ + { + scenario: "With multiple instances", + spec: New( + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb"}}, + k8sutil.KubernetesClient{}, + acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, + Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, + logger, + eventRecorder), + check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{ + testPodDisruptionBudgetOwnerReference, + hasName("postgres-myapp-database-critical-op-pdb"), + hasMinAvailable(3), + testLabelsAndSelectors(false), + }, + }, + { + scenario: "With zero instances", + spec: New( + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb"}}, + k8sutil.KubernetesClient{}, + acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, + Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 0}}, + logger, + eventRecorder), + check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{ + testPodDisruptionBudgetOwnerReference, + hasName("postgres-myapp-database-critical-op-pdb"), + hasMinAvailable(0), + testLabelsAndSelectors(false), + }, + }, + { + scenario: "With PodDisruptionBudget disabled", + spec: New( + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role"}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.False()}}, + k8sutil.KubernetesClient{}, + acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, + Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, + logger, + eventRecorder), + check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{ + testPodDisruptionBudgetOwnerReference, + hasName("postgres-myapp-database-critical-op-pdb"), + hasMinAvailable(0), + testLabelsAndSelectors(false), + }, + }, + { + scenario: "With OwnerReference enabled", + spec: New( + Config{OpConfig: config.Config{Resources: config.Resources{ClusterNameLabel: "cluster-name", PodRoleLabel: "spilo-role", EnableOwnerReferences: util.True()}, PDBNameFormat: "postgres-{cluster}-pdb", EnablePodDisruptionBudget: util.True()}}, + k8sutil.KubernetesClient{}, + acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{Name: "myapp-database", Namespace: "myapp"}, + Spec: acidv1.PostgresSpec{TeamID: "myapp", NumberOfInstances: 3}}, + logger, + eventRecorder), + check: []func(cluster *Cluster, podDisruptionBudget *policyv1.PodDisruptionBudget) error{ + testPodDisruptionBudgetOwnerReference, + hasName("postgres-myapp-database-critical-op-pdb"), + hasMinAvailable(3), + testLabelsAndSelectors(false), + }, + }, + } + + for _, tt := range testCriticalOp { + result := tt.spec.generateCriticalOpPodDisruptionBudget() for _, check := range tt.check { err := check(tt.spec, result) if err != nil { @@ -2520,7 +2798,8 @@ func TestGenerateService(t *testing.T) { Name: "cluster-specific-sidecar", }, { - Name: "cluster-specific-sidecar-with-resources", + Name: "cluster-specific-sidecar-with-resources", + DockerImage: "test-image", Resources: &acidv1.Resources{ ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("210m"), Memory: k8sutil.StringToPointer("0.8Gi")}, ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("510m"), Memory: k8sutil.StringToPointer("1.4Gi")}, @@ -2830,6 +3109,7 @@ func TestGenerateResourceRequirements(t *testing.T) { namespace := "default" clusterNameLabel := "cluster-name" sidecarName := "postgres-exporter" + dockerImage := "test-image" // enforceMinResourceLimits will be called 2 times emitting 4 events (2x cpu, 2x memory raise) // enforceMaxResourceRequests will be called 4 times emitting 6 events (2x cpu, 4x memory cap) @@ -2895,7 +3175,8 @@ func TestGenerateResourceRequirements(t *testing.T) { Spec: acidv1.PostgresSpec{ Sidecars: []acidv1.Sidecar{ { - Name: sidecarName, + Name: sidecarName, + DockerImage: dockerImage, }, }, TeamID: "acid", @@ -3134,7 +3415,8 @@ func TestGenerateResourceRequirements(t *testing.T) { Spec: acidv1.PostgresSpec{ Sidecars: []acidv1.Sidecar{ { - Name: sidecarName, + Name: sidecarName, + DockerImage: dockerImage, Resources: &acidv1.Resources{ ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("10m"), Memory: k8sutil.StringToPointer("10Mi")}, ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")}, @@ -3223,7 +3505,8 @@ func TestGenerateResourceRequirements(t *testing.T) { Spec: acidv1.PostgresSpec{ Sidecars: []acidv1.Sidecar{ { - Name: sidecarName, + Name: sidecarName, + DockerImage: dockerImage, Resources: &acidv1.Resources{ ResourceRequests: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("10m"), Memory: k8sutil.StringToPointer("10Mi")}, ResourceLimits: acidv1.ResourceDescription{CPU: k8sutil.StringToPointer("100m"), Memory: k8sutil.StringToPointer("100Mi")}, diff --git a/pkg/cluster/majorversionupgrade.go b/pkg/cluster/majorversionupgrade.go index a4ae5f81b..b80cbaa09 100644 --- a/pkg/cluster/majorversionupgrade.go +++ b/pkg/cluster/majorversionupgrade.go @@ -16,7 +16,6 @@ import ( // VersionMap Map of version numbers var VersionMap = map[string]int{ - "12": 120000, "13": 130000, "14": 140000, "15": 150000, @@ -106,6 +105,22 @@ func (c *Cluster) removeFailuresAnnotation() error { return nil } +func (c *Cluster) criticalOperationLabel(pods []v1.Pod, value *string) error { + metadataReq := map[string]map[string]map[string]*string{"metadata": {"labels": {"critical-operation": value}}} + + patchReq, err := json.Marshal(metadataReq) + if err != nil { + return fmt.Errorf("could not marshal ObjectMeta: %v", err) + } + for _, pod := range pods { + _, err = c.KubeClient.Pods(c.Namespace).Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, patchReq, metav1.PatchOptions{}) + if err != nil { + return err + } + } + return nil +} + /* Execute upgrade when mode is set to manual or full or when the owning team is allowed for upgrade (and mode is "off"). @@ -129,17 +144,13 @@ func (c *Cluster) majorVersionUpgrade() error { return nil } - if !isInMainternanceWindow(c.Spec.MaintenanceWindows) { - c.logger.Infof("skipping major version upgrade, not in maintenance window") - return nil - } - pods, err := c.listPods() if err != nil { return err } allRunning := true + isStandbyCluster := false var masterPod *v1.Pod @@ -147,8 +158,9 @@ func (c *Cluster) majorVersionUpgrade() error { ps, _ := c.patroni.GetMemberData(&pod) if ps.Role == "standby_leader" { - c.logger.Errorf("skipping major version upgrade for %s/%s standby cluster. Re-deploy standby cluster with the required Postgres version specified", c.Namespace, c.Name) - return nil + isStandbyCluster = true + c.currentMajorVersion = ps.ServerVersion + break } if ps.State != "running" { @@ -175,6 +187,9 @@ func (c *Cluster) majorVersionUpgrade() error { } c.logger.Infof("recheck cluster version is already up to date. current: %d, min desired: %d", c.currentMajorVersion, desiredVersion) return nil + } else if isStandbyCluster { + c.logger.Warnf("skipping major version upgrade for %s/%s standby cluster. Re-deploy standby cluster with the required Postgres version specified", c.Namespace, c.Name) + return nil } if _, exists := c.ObjectMeta.Annotations[majorVersionUpgradeFailureAnnotation]; exists { @@ -182,6 +197,11 @@ func (c *Cluster) majorVersionUpgrade() error { return nil } + if !isInMaintenanceWindow(c.Spec.MaintenanceWindows) { + c.logger.Infof("skipping major version upgrade, not in maintenance window") + return nil + } + members, err := c.patroni.GetClusterMembers(masterPod) if err != nil { c.logger.Error("could not get cluster members data from Patroni API, skipping major version upgrade") @@ -216,9 +236,20 @@ func (c *Cluster) majorVersionUpgrade() error { isUpgradeSuccess := true numberOfPods := len(pods) - if allRunning && masterPod != nil { + if allRunning { c.logger.Infof("healthy cluster ready to upgrade, current: %d desired: %d", c.currentMajorVersion, desiredVersion) if c.currentMajorVersion < desiredVersion { + defer func() error { + if err = c.criticalOperationLabel(pods, nil); err != nil { + return fmt.Errorf("failed to remove critical-operation label: %s", err) + } + return nil + }() + val := "true" + if err = c.criticalOperationLabel(pods, &val); err != nil { + return fmt.Errorf("failed to assign critical-operation label: %s", err) + } + podName := &spec.NamespacedName{Namespace: masterPod.Namespace, Name: masterPod.Name} c.logger.Infof("triggering major version upgrade on pod %s of %d pods", masterPod.Name, numberOfPods) c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeNormal, "Major Version Upgrade", "starting major version upgrade on pod %s of %d pods", masterPod.Name, numberOfPods) @@ -245,7 +276,7 @@ func (c *Cluster) majorVersionUpgrade() error { isUpgradeSuccess = false c.annotatePostgresResource(isUpgradeSuccess) c.eventRecorder.Eventf(c.GetReference(), v1.EventTypeWarning, "Major Version Upgrade", "upgrade from %d to %d FAILED: %v", c.currentMajorVersion, desiredVersion, scriptErrMsg) - return fmt.Errorf(scriptErrMsg) + return fmt.Errorf("%s", scriptErrMsg) } c.annotatePostgresResource(isUpgradeSuccess) diff --git a/pkg/cluster/pod.go b/pkg/cluster/pod.go index bd2172c18..12a18b9b3 100644 --- a/pkg/cluster/pod.go +++ b/pkg/cluster/pod.go @@ -3,12 +3,11 @@ package cluster import ( "context" "fmt" + "slices" "sort" "strconv" "time" - "golang.org/x/exp/slices" - appsv1 "k8s.io/api/apps/v1" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -280,11 +279,16 @@ func (c *Cluster) MigrateMasterPod(podName spec.NamespacedName) error { return fmt.Errorf("could not move pod: %v", err) } + scheduleSwitchover := false + if !isInMaintenanceWindow(c.Spec.MaintenanceWindows) { + c.logger.Infof("postponing switchover, not in maintenance window") + scheduleSwitchover = true + } err = retryutil.Retry(1*time.Minute, 5*time.Minute, func() (bool, error) { - err := c.Switchover(oldMaster, masterCandidateName) + err := c.Switchover(oldMaster, masterCandidateName, scheduleSwitchover) if err != nil { - c.logger.Errorf("could not failover to pod %q: %v", masterCandidateName, err) + c.logger.Errorf("could not switchover to pod %q: %v", masterCandidateName, err) return false, nil } return true, nil @@ -428,9 +432,10 @@ func (c *Cluster) recreatePods(pods []v1.Pod, switchoverCandidates []spec.Namesp } newRole := PostgresRole(newPod.Labels[c.OpConfig.PodRoleLabel]) - if newRole == Replica { + switch newRole { + case Replica: replicas = append(replicas, util.NameFromMeta(pod.ObjectMeta)) - } else if newRole == Master { + case Master: newMasterPod = newPod } } @@ -445,7 +450,7 @@ func (c *Cluster) recreatePods(pods []v1.Pod, switchoverCandidates []spec.Namesp // do not recreate master now so it will keep the update flag and switchover will be retried on next sync return fmt.Errorf("skipping switchover: %v", err) } - if err := c.Switchover(masterPod, masterCandidate); err != nil { + if err := c.Switchover(masterPod, masterCandidate, false); err != nil { return fmt.Errorf("could not perform switch over: %v", err) } } else if newMasterPod == nil && len(replicas) == 0 { diff --git a/pkg/cluster/resources.go b/pkg/cluster/resources.go index 3f47328ee..ed3eb3d75 100644 --- a/pkg/cluster/resources.go +++ b/pkg/cluster/resources.go @@ -23,8 +23,13 @@ const ( ) func (c *Cluster) listResources() error { - if c.PodDisruptionBudget != nil { - c.logger.Infof("found pod disruption budget: %q (uid: %q)", util.NameFromMeta(c.PodDisruptionBudget.ObjectMeta), c.PodDisruptionBudget.UID) + if c.PrimaryPodDisruptionBudget != nil { + c.logger.Infof("found primary pod disruption budget: %q (uid: %q)", util.NameFromMeta(c.PrimaryPodDisruptionBudget.ObjectMeta), c.PrimaryPodDisruptionBudget.UID) + } + + if c.CriticalOpPodDisruptionBudget != nil { + c.logger.Infof("found pod disruption budget for critical operations: %q (uid: %q)", util.NameFromMeta(c.CriticalOpPodDisruptionBudget.ObjectMeta), c.CriticalOpPodDisruptionBudget.UID) + } if c.Statefulset != nil { @@ -89,12 +94,12 @@ func (c *Cluster) listResources() error { func (c *Cluster) createStatefulSet() (*appsv1.StatefulSet, error) { c.setProcessName("creating statefulset") // check if it's allowed that spec contains initContainers - if c.Spec.InitContainers != nil && len(c.Spec.InitContainers) > 0 && + if len(c.Spec.InitContainers) > 0 && c.OpConfig.EnableInitContainers != nil && !(*c.OpConfig.EnableInitContainers) { return nil, fmt.Errorf("initContainers specified but disabled in configuration") } // check if it's allowed that spec contains sidecars - if c.Spec.Sidecars != nil && len(c.Spec.Sidecars) > 0 && + if len(c.Spec.Sidecars) > 0 && c.OpConfig.EnableSidecars != nil && !(*c.OpConfig.EnableSidecars) { return nil, fmt.Errorf("sidecar containers specified but disabled in configuration") } @@ -162,8 +167,8 @@ func (c *Cluster) preScaleDown(newStatefulSet *appsv1.StatefulSet) error { return fmt.Errorf("pod %q does not belong to cluster", podName) } - if err := c.patroni.Switchover(&masterPod[0], masterCandidatePod.Name); err != nil { - return fmt.Errorf("could not failover: %v", err) + if err := c.patroni.Switchover(&masterPod[0], masterCandidatePod.Name, ""); err != nil { + return fmt.Errorf("could not switchover: %v", err) } return nil @@ -329,7 +334,7 @@ func (c *Cluster) updateService(role PostgresRole, oldService *v1.Service, newSe } } - if changed, _ := c.compareAnnotations(oldService.Annotations, newService.Annotations); changed { + if changed, _ := c.compareAnnotations(oldService.Annotations, newService.Annotations, nil); changed { patchData, err := metaAnnotationsPatch(newService.Annotations) if err != nil { return nil, fmt.Errorf("could not form patch for service %q annotations: %v", oldService.Name, err) @@ -417,59 +422,128 @@ func (c *Cluster) generateEndpointSubsets(role PostgresRole) []v1.EndpointSubset return result } -func (c *Cluster) createPodDisruptionBudget() (*policyv1.PodDisruptionBudget, error) { - podDisruptionBudgetSpec := c.generatePodDisruptionBudget() +func (c *Cluster) createPrimaryPodDisruptionBudget() error { + c.logger.Debug("creating primary pod disruption budget") + if c.PrimaryPodDisruptionBudget != nil { + c.logger.Warning("primary pod disruption budget already exists in the cluster") + return nil + } + + podDisruptionBudgetSpec := c.generatePrimaryPodDisruptionBudget() podDisruptionBudget, err := c.KubeClient. PodDisruptionBudgets(podDisruptionBudgetSpec.Namespace). Create(context.TODO(), podDisruptionBudgetSpec, metav1.CreateOptions{}) if err != nil { - return nil, err + return err } - c.PodDisruptionBudget = podDisruptionBudget + c.logger.Infof("primary pod disruption budget %q has been successfully created", util.NameFromMeta(podDisruptionBudget.ObjectMeta)) + c.PrimaryPodDisruptionBudget = podDisruptionBudget - return podDisruptionBudget, nil + return nil } -func (c *Cluster) updatePodDisruptionBudget(pdb *policyv1.PodDisruptionBudget) error { - if c.PodDisruptionBudget == nil { - return fmt.Errorf("there is no pod disruption budget in the cluster") +func (c *Cluster) createCriticalOpPodDisruptionBudget() error { + c.logger.Debug("creating pod disruption budget for critical operations") + if c.CriticalOpPodDisruptionBudget != nil { + c.logger.Warning("pod disruption budget for critical operations already exists in the cluster") + return nil } - if err := c.deletePodDisruptionBudget(); err != nil { - return fmt.Errorf("could not delete pod disruption budget: %v", err) + podDisruptionBudgetSpec := c.generateCriticalOpPodDisruptionBudget() + podDisruptionBudget, err := c.KubeClient. + PodDisruptionBudgets(podDisruptionBudgetSpec.Namespace). + Create(context.TODO(), podDisruptionBudgetSpec, metav1.CreateOptions{}) + + if err != nil { + return err + } + c.logger.Infof("pod disruption budget for critical operations %q has been successfully created", util.NameFromMeta(podDisruptionBudget.ObjectMeta)) + c.CriticalOpPodDisruptionBudget = podDisruptionBudget + + return nil +} + +func (c *Cluster) createPodDisruptionBudgets() error { + errors := make([]string, 0) + + err := c.createPrimaryPodDisruptionBudget() + if err != nil { + errors = append(errors, fmt.Sprintf("could not create primary pod disruption budget: %v", err)) + } + + err = c.createCriticalOpPodDisruptionBudget() + if err != nil { + errors = append(errors, fmt.Sprintf("could not create pod disruption budget for critical operations: %v", err)) + } + + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, `', '`)) + } + return nil +} + +func (c *Cluster) updatePrimaryPodDisruptionBudget(pdb *policyv1.PodDisruptionBudget) error { + c.logger.Debug("updating primary pod disruption budget") + if c.PrimaryPodDisruptionBudget == nil { + return fmt.Errorf("there is no primary pod disruption budget in the cluster") + } + + if err := c.deletePrimaryPodDisruptionBudget(); err != nil { + return fmt.Errorf("could not delete primary pod disruption budget: %v", err) } newPdb, err := c.KubeClient. PodDisruptionBudgets(pdb.Namespace). Create(context.TODO(), pdb, metav1.CreateOptions{}) if err != nil { - return fmt.Errorf("could not create pod disruption budget: %v", err) + return fmt.Errorf("could not create primary pod disruption budget: %v", err) } - c.PodDisruptionBudget = newPdb + c.PrimaryPodDisruptionBudget = newPdb return nil } -func (c *Cluster) deletePodDisruptionBudget() error { - c.logger.Debug("deleting pod disruption budget") - if c.PodDisruptionBudget == nil { - c.logger.Debug("there is no pod disruption budget in the cluster") +func (c *Cluster) updateCriticalOpPodDisruptionBudget(pdb *policyv1.PodDisruptionBudget) error { + c.logger.Debug("updating pod disruption budget for critical operations") + if c.CriticalOpPodDisruptionBudget == nil { + return fmt.Errorf("there is no pod disruption budget for critical operations in the cluster") + } + + if err := c.deleteCriticalOpPodDisruptionBudget(); err != nil { + return fmt.Errorf("could not delete pod disruption budget for critical operations: %v", err) + } + + newPdb, err := c.KubeClient. + PodDisruptionBudgets(pdb.Namespace). + Create(context.TODO(), pdb, metav1.CreateOptions{}) + if err != nil { + return fmt.Errorf("could not create pod disruption budget for critical operations: %v", err) + } + c.CriticalOpPodDisruptionBudget = newPdb + + return nil +} + +func (c *Cluster) deletePrimaryPodDisruptionBudget() error { + c.logger.Debug("deleting primary pod disruption budget") + if c.PrimaryPodDisruptionBudget == nil { + c.logger.Debug("there is no primary pod disruption budget in the cluster") return nil } - pdbName := util.NameFromMeta(c.PodDisruptionBudget.ObjectMeta) + pdbName := util.NameFromMeta(c.PrimaryPodDisruptionBudget.ObjectMeta) err := c.KubeClient. - PodDisruptionBudgets(c.PodDisruptionBudget.Namespace). - Delete(context.TODO(), c.PodDisruptionBudget.Name, c.deleteOptions) + PodDisruptionBudgets(c.PrimaryPodDisruptionBudget.Namespace). + Delete(context.TODO(), c.PrimaryPodDisruptionBudget.Name, c.deleteOptions) if k8sutil.ResourceNotFound(err) { - c.logger.Debugf("PodDisruptionBudget %q has already been deleted", util.NameFromMeta(c.PodDisruptionBudget.ObjectMeta)) + c.logger.Debugf("PodDisruptionBudget %q has already been deleted", util.NameFromMeta(c.PrimaryPodDisruptionBudget.ObjectMeta)) } else if err != nil { - return fmt.Errorf("could not delete PodDisruptionBudget: %v", err) + return fmt.Errorf("could not delete primary pod disruption budget: %v", err) } - c.logger.Infof("pod disruption budget %q has been deleted", util.NameFromMeta(c.PodDisruptionBudget.ObjectMeta)) - c.PodDisruptionBudget = nil + c.logger.Infof("pod disruption budget %q has been deleted", util.NameFromMeta(c.PrimaryPodDisruptionBudget.ObjectMeta)) + c.PrimaryPodDisruptionBudget = nil err = retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout, func() (bool, error) { @@ -483,12 +557,67 @@ func (c *Cluster) deletePodDisruptionBudget() error { return false, err2 }) if err != nil { - return fmt.Errorf("could not delete pod disruption budget: %v", err) + return fmt.Errorf("could not delete primary pod disruption budget: %v", err) } return nil } +func (c *Cluster) deleteCriticalOpPodDisruptionBudget() error { + c.logger.Debug("deleting pod disruption budget for critical operations") + if c.CriticalOpPodDisruptionBudget == nil { + c.logger.Debug("there is no pod disruption budget for critical operations in the cluster") + return nil + } + + pdbName := util.NameFromMeta(c.CriticalOpPodDisruptionBudget.ObjectMeta) + err := c.KubeClient. + PodDisruptionBudgets(c.CriticalOpPodDisruptionBudget.Namespace). + Delete(context.TODO(), c.CriticalOpPodDisruptionBudget.Name, c.deleteOptions) + if k8sutil.ResourceNotFound(err) { + c.logger.Debugf("PodDisruptionBudget %q has already been deleted", util.NameFromMeta(c.CriticalOpPodDisruptionBudget.ObjectMeta)) + } else if err != nil { + return fmt.Errorf("could not delete pod disruption budget for critical operations: %v", err) + } + + c.logger.Infof("pod disruption budget %q has been deleted", util.NameFromMeta(c.CriticalOpPodDisruptionBudget.ObjectMeta)) + c.CriticalOpPodDisruptionBudget = nil + + err = retryutil.Retry(c.OpConfig.ResourceCheckInterval, c.OpConfig.ResourceCheckTimeout, + func() (bool, error) { + _, err2 := c.KubeClient.PodDisruptionBudgets(pdbName.Namespace).Get(context.TODO(), pdbName.Name, metav1.GetOptions{}) + if err2 == nil { + return false, nil + } + if k8sutil.ResourceNotFound(err2) { + return true, nil + } + return false, err2 + }) + if err != nil { + return fmt.Errorf("could not delete pod disruption budget for critical operations: %v", err) + } + + return nil +} + +func (c *Cluster) deletePodDisruptionBudgets() error { + errors := make([]string, 0) + + if err := c.deletePrimaryPodDisruptionBudget(); err != nil { + errors = append(errors, fmt.Sprintf("%v", err)) + } + + if err := c.deleteCriticalOpPodDisruptionBudget(); err != nil { + errors = append(errors, fmt.Sprintf("%v", err)) + } + + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, `', '`)) + } + return nil +} + func (c *Cluster) deleteEndpoint(role PostgresRole) error { c.setProcessName("deleting endpoint") c.logger.Debugf("deleting %s endpoint", role) @@ -705,7 +834,12 @@ func (c *Cluster) GetStatefulSet() *appsv1.StatefulSet { return c.Statefulset } -// GetPodDisruptionBudget returns cluster's kubernetes PodDisruptionBudget -func (c *Cluster) GetPodDisruptionBudget() *policyv1.PodDisruptionBudget { - return c.PodDisruptionBudget +// GetPrimaryPodDisruptionBudget returns cluster's primary kubernetes PodDisruptionBudget +func (c *Cluster) GetPrimaryPodDisruptionBudget() *policyv1.PodDisruptionBudget { + return c.PrimaryPodDisruptionBudget +} + +// GetCriticalOpPodDisruptionBudget returns cluster's kubernetes PodDisruptionBudget for critical operations +func (c *Cluster) GetCriticalOpPodDisruptionBudget() *policyv1.PodDisruptionBudget { + return c.CriticalOpPodDisruptionBudget } diff --git a/pkg/cluster/streams.go b/pkg/cluster/streams.go index 9e2c7482a..bf9be3fb4 100644 --- a/pkg/cluster/streams.go +++ b/pkg/cluster/streams.go @@ -114,10 +114,10 @@ func (c *Cluster) syncPublication(dbName string, databaseSlotsList map[string]za } for slotName, slotAndPublication := range databaseSlotsList { - tables := slotAndPublication.Publication - tableNames := make([]string, len(tables)) + newTables := slotAndPublication.Publication + tableNames := make([]string, len(newTables)) i := 0 - for t := range tables { + for t := range newTables { tableName, schemaName := getTableSchema(t) tableNames[i] = fmt.Sprintf("%s.%s", schemaName, tableName) i++ @@ -126,6 +126,12 @@ func (c *Cluster) syncPublication(dbName string, databaseSlotsList map[string]za tableList := strings.Join(tableNames, ", ") currentTables, exists := currentPublications[slotName] + // if newTables is empty it means that it's definition was removed from streams section + // but when slot is defined in manifest we should sync publications, too + // by reusing current tables we make sure it is not + if len(newTables) == 0 { + tableList = currentTables + } if !exists { createPublications[slotName] = tableList } else if currentTables != tableList { @@ -350,16 +356,8 @@ func (c *Cluster) syncStreams() error { return nil } - databaseSlots := make(map[string]map[string]zalandov1.Slot) - slotsToSync := make(map[string]map[string]string) - requiredPatroniConfig := c.Spec.Patroni - - if len(requiredPatroniConfig.Slots) > 0 { - for slotName, slotConfig := range requiredPatroniConfig.Slots { - slotsToSync[slotName] = slotConfig - } - } - + // create map with every database and empty slot defintion + // we need it to detect removal of streams from databases if err := c.initDbConn(); err != nil { return fmt.Errorf("could not init database connection") } @@ -372,13 +370,28 @@ func (c *Cluster) syncStreams() error { if err != nil { return fmt.Errorf("could not get list of databases: %v", err) } - // get database name with empty list of slot, except template0 and template1 + databaseSlots := make(map[string]map[string]zalandov1.Slot) for dbName := range listDatabases { if dbName != "template0" && dbName != "template1" { databaseSlots[dbName] = map[string]zalandov1.Slot{} } } + // need to take explicitly defined slots into account whey syncing Patroni config + slotsToSync := make(map[string]map[string]string) + requiredPatroniConfig := c.Spec.Patroni + if len(requiredPatroniConfig.Slots) > 0 { + for slotName, slotConfig := range requiredPatroniConfig.Slots { + slotsToSync[slotName] = slotConfig + if _, exists := databaseSlots[slotConfig["database"]]; exists { + databaseSlots[slotConfig["database"]][slotName] = zalandov1.Slot{ + Slot: slotConfig, + Publication: make(map[string]acidv1.StreamTable), + } + } + } + } + // get list of required slots and publications, group by database for _, stream := range c.Spec.Streams { if _, exists := databaseSlots[stream.Database]; !exists { @@ -391,13 +404,13 @@ func (c *Cluster) syncStreams() error { "type": "logical", } slotName := getSlotName(stream.Database, stream.ApplicationId) - if _, exists := databaseSlots[stream.Database][slotName]; !exists { + slotAndPublication, exists := databaseSlots[stream.Database][slotName] + if !exists { databaseSlots[stream.Database][slotName] = zalandov1.Slot{ Slot: slot, Publication: stream.Tables, } } else { - slotAndPublication := databaseSlots[stream.Database][slotName] streamTables := slotAndPublication.Publication for tableName, table := range stream.Tables { if _, exists := streamTables[tableName]; !exists { @@ -492,16 +505,17 @@ func (c *Cluster) syncStream(appId string) error { continue } streamExists = true + c.Streams[appId] = &stream desiredStreams := c.generateFabricEventStream(appId) if !reflect.DeepEqual(stream.ObjectMeta.OwnerReferences, desiredStreams.ObjectMeta.OwnerReferences) { c.logger.Infof("owner references of event streams with applicationId %s do not match the current ones", appId) stream.ObjectMeta.OwnerReferences = desiredStreams.ObjectMeta.OwnerReferences c.setProcessName("updating event streams with applicationId %s", appId) - stream, err := c.KubeClient.FabricEventStreams(stream.Namespace).Update(context.TODO(), &stream, metav1.UpdateOptions{}) + updatedStream, err := c.KubeClient.FabricEventStreams(stream.Namespace).Update(context.TODO(), &stream, metav1.UpdateOptions{}) if err != nil { return fmt.Errorf("could not update event streams with applicationId %s: %v", appId, err) } - c.Streams[appId] = stream + c.Streams[appId] = updatedStream } if match, reason := c.compareStreams(&stream, desiredStreams); !match { c.logger.Infof("updating event streams with applicationId %s: %s", appId, reason) @@ -545,7 +559,7 @@ func (c *Cluster) compareStreams(curEventStreams, newEventStreams *zalandov1.Fab for newKey, newValue := range newEventStreams.Annotations { desiredAnnotations[newKey] = newValue } - if changed, reason := c.compareAnnotations(curEventStreams.ObjectMeta.Annotations, desiredAnnotations); changed { + if changed, reason := c.compareAnnotations(curEventStreams.ObjectMeta.Annotations, desiredAnnotations, nil); changed { match = false reasons = append(reasons, fmt.Sprintf("new streams annotations do not match: %s", reason)) } diff --git a/pkg/cluster/sync.go b/pkg/cluster/sync.go index d1a339001..ecf692702 100644 --- a/pkg/cluster/sync.go +++ b/pkg/cluster/sync.go @@ -4,8 +4,10 @@ import ( "context" "encoding/json" "fmt" + "maps" "reflect" "regexp" + "slices" "strconv" "strings" "time" @@ -15,8 +17,6 @@ import ( "github.com/zalando/postgres-operator/pkg/util" "github.com/zalando/postgres-operator/pkg/util/constants" "github.com/zalando/postgres-operator/pkg/util/k8sutil" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" batchv1 "k8s.io/api/batch/v1" v1 "k8s.io/api/core/v1" policyv1 "k8s.io/api/policy/v1" @@ -97,6 +97,11 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { } } + if !isInMaintenanceWindow(newSpec.Spec.MaintenanceWindows) { + // do not apply any major version related changes yet + newSpec.Spec.PostgresqlParam.PgVersion = oldSpec.Spec.PostgresqlParam.PgVersion + } + if err = c.syncStatefulSet(); err != nil { if !k8sutil.ResourceAlreadyExists(err) { err = fmt.Errorf("could not sync statefulsets: %v", err) @@ -112,8 +117,8 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { } c.logger.Debug("syncing pod disruption budgets") - if err = c.syncPodDisruptionBudget(false); err != nil { - err = fmt.Errorf("could not sync pod disruption budget: %v", err) + if err = c.syncPodDisruptionBudgets(false); err != nil { + err = fmt.Errorf("could not sync pod disruption budgets: %v", err) return err } @@ -148,7 +153,10 @@ func (c *Cluster) Sync(newSpec *acidv1.Postgresql) error { return fmt.Errorf("could not sync connection pooler: %v", err) } - if len(c.Spec.Streams) > 0 { + // sync if manifest stream count is different from stream CR count + // it can be that they are always different due to grouping of manifest streams + // but we would catch missed removals on update + if len(c.Spec.Streams) != len(c.Streams) { c.logger.Debug("syncing streams") if err = c.syncStreams(); err != nil { err = fmt.Errorf("could not sync streams: %v", err) @@ -230,7 +238,7 @@ func (c *Cluster) syncPatroniConfigMap(suffix string) error { maps.Copy(annotations, cm.Annotations) // Patroni can add extra annotations so incl. current annotations in desired annotations desiredAnnotations := c.annotationsSet(cm.Annotations) - if changed, _ := c.compareAnnotations(annotations, desiredAnnotations); changed { + if changed, _ := c.compareAnnotations(annotations, desiredAnnotations, nil); changed { patchData, err := metaAnnotationsPatch(desiredAnnotations) if err != nil { return fmt.Errorf("could not form patch for %s config map: %v", configMapName, err) @@ -275,7 +283,7 @@ func (c *Cluster) syncPatroniEndpoint(suffix string) error { maps.Copy(annotations, ep.Annotations) // Patroni can add extra annotations so incl. current annotations in desired annotations desiredAnnotations := c.annotationsSet(ep.Annotations) - if changed, _ := c.compareAnnotations(annotations, desiredAnnotations); changed { + if changed, _ := c.compareAnnotations(annotations, desiredAnnotations, nil); changed { patchData, err := metaAnnotationsPatch(desiredAnnotations) if err != nil { return fmt.Errorf("could not form patch for %s endpoint: %v", endpointName, err) @@ -320,7 +328,7 @@ func (c *Cluster) syncPatroniService() error { maps.Copy(annotations, svc.Annotations) // Patroni can add extra annotations so incl. current annotations in desired annotations desiredAnnotations := c.annotationsSet(svc.Annotations) - if changed, _ := c.compareAnnotations(annotations, desiredAnnotations); changed { + if changed, _ := c.compareAnnotations(annotations, desiredAnnotations, nil); changed { patchData, err := metaAnnotationsPatch(desiredAnnotations) if err != nil { return fmt.Errorf("could not form patch for %s service: %v", serviceName, err) @@ -412,7 +420,7 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error { return fmt.Errorf("could not update %s endpoint: %v", role, err) } } else { - if changed, _ := c.compareAnnotations(ep.Annotations, desiredEp.Annotations); changed { + if changed, _ := c.compareAnnotations(ep.Annotations, desiredEp.Annotations, nil); changed { patchData, err := metaAnnotationsPatch(desiredEp.Annotations) if err != nil { return fmt.Errorf("could not form patch for %s endpoint: %v", role, err) @@ -447,22 +455,22 @@ func (c *Cluster) syncEndpoint(role PostgresRole) error { return nil } -func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error { +func (c *Cluster) syncPrimaryPodDisruptionBudget(isUpdate bool) error { var ( pdb *policyv1.PodDisruptionBudget err error ) - if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.podDisruptionBudgetName(), metav1.GetOptions{}); err == nil { - c.PodDisruptionBudget = pdb - newPDB := c.generatePodDisruptionBudget() + if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.PrimaryPodDisruptionBudgetName(), metav1.GetOptions{}); err == nil { + c.PrimaryPodDisruptionBudget = pdb + newPDB := c.generatePrimaryPodDisruptionBudget() match, reason := c.comparePodDisruptionBudget(pdb, newPDB) if !match { c.logPDBChanges(pdb, newPDB, isUpdate, reason) - if err = c.updatePodDisruptionBudget(newPDB); err != nil { + if err = c.updatePrimaryPodDisruptionBudget(newPDB); err != nil { return err } } else { - c.PodDisruptionBudget = pdb + c.PrimaryPodDisruptionBudget = pdb } return nil @@ -471,21 +479,74 @@ func (c *Cluster) syncPodDisruptionBudget(isUpdate bool) error { return fmt.Errorf("could not get pod disruption budget: %v", err) } // no existing pod disruption budget, create new one - c.logger.Infof("could not find the cluster's pod disruption budget") + c.logger.Infof("could not find the primary pod disruption budget") - if pdb, err = c.createPodDisruptionBudget(); err != nil { + if err = c.createPrimaryPodDisruptionBudget(); err != nil { if !k8sutil.ResourceAlreadyExists(err) { - return fmt.Errorf("could not create pod disruption budget: %v", err) + return fmt.Errorf("could not create primary pod disruption budget: %v", err) } c.logger.Infof("pod disruption budget %q already exists", util.NameFromMeta(pdb.ObjectMeta)) - if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.podDisruptionBudgetName(), metav1.GetOptions{}); err != nil { + if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.PrimaryPodDisruptionBudgetName(), metav1.GetOptions{}); err != nil { return fmt.Errorf("could not fetch existing %q pod disruption budget", util.NameFromMeta(pdb.ObjectMeta)) } } - c.logger.Infof("created missing pod disruption budget %q", util.NameFromMeta(pdb.ObjectMeta)) - c.PodDisruptionBudget = pdb + return nil +} +func (c *Cluster) syncCriticalOpPodDisruptionBudget(isUpdate bool) error { + var ( + pdb *policyv1.PodDisruptionBudget + err error + ) + if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.criticalOpPodDisruptionBudgetName(), metav1.GetOptions{}); err == nil { + c.CriticalOpPodDisruptionBudget = pdb + newPDB := c.generateCriticalOpPodDisruptionBudget() + match, reason := c.comparePodDisruptionBudget(pdb, newPDB) + if !match { + c.logPDBChanges(pdb, newPDB, isUpdate, reason) + if err = c.updateCriticalOpPodDisruptionBudget(newPDB); err != nil { + return err + } + } else { + c.CriticalOpPodDisruptionBudget = pdb + } + return nil + + } + if !k8sutil.ResourceNotFound(err) { + return fmt.Errorf("could not get pod disruption budget: %v", err) + } + // no existing pod disruption budget, create new one + c.logger.Infof("could not find pod disruption budget for critical operations") + + if err = c.createCriticalOpPodDisruptionBudget(); err != nil { + if !k8sutil.ResourceAlreadyExists(err) { + return fmt.Errorf("could not create pod disruption budget for critical operations: %v", err) + } + c.logger.Infof("pod disruption budget %q already exists", util.NameFromMeta(pdb.ObjectMeta)) + if pdb, err = c.KubeClient.PodDisruptionBudgets(c.Namespace).Get(context.TODO(), c.criticalOpPodDisruptionBudgetName(), metav1.GetOptions{}); err != nil { + return fmt.Errorf("could not fetch existing %q pod disruption budget", util.NameFromMeta(pdb.ObjectMeta)) + } + } + + return nil +} + +func (c *Cluster) syncPodDisruptionBudgets(isUpdate bool) error { + errors := make([]string, 0) + + if err := c.syncPrimaryPodDisruptionBudget(isUpdate); err != nil { + errors = append(errors, fmt.Sprintf("%v", err)) + } + + if err := c.syncCriticalOpPodDisruptionBudget(isUpdate); err != nil { + errors = append(errors, fmt.Sprintf("%v", err)) + } + + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, `', '`)) + } return nil } @@ -497,6 +558,7 @@ func (c *Cluster) syncStatefulSet() error { ) podsToRecreate := make([]v1.Pod, 0) isSafeToRecreatePods := true + postponeReasons := make([]string, 0) switchoverCandidates := make([]spec.NamespacedName, 0) pods, err := c.listPods() @@ -561,13 +623,22 @@ func (c *Cluster) syncStatefulSet() error { cmp := c.compareStatefulSetWith(desiredSts) if !cmp.rollingUpdate { + updatedPodAnnotations := map[string]*string{} + for _, anno := range cmp.deletedPodAnnotations { + updatedPodAnnotations[anno] = nil + } + for anno, val := range desiredSts.Spec.Template.Annotations { + updatedPodAnnotations[anno] = &val + } + metadataReq := map[string]map[string]map[string]*string{"metadata": {"annotations": updatedPodAnnotations}} + patch, err := json.Marshal(metadataReq) + if err != nil { + return fmt.Errorf("could not form patch for pod annotations: %v", err) + } + for _, pod := range pods { - if changed, _ := c.compareAnnotations(pod.Annotations, desiredSts.Spec.Template.Annotations); changed { - patchData, err := metaAnnotationsPatch(desiredSts.Spec.Template.Annotations) - if err != nil { - return fmt.Errorf("could not form patch for pod %q annotations: %v", pod.Name, err) - } - _, err = c.KubeClient.Pods(pod.Namespace).Patch(context.TODO(), pod.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) + if changed, _ := c.compareAnnotations(pod.Annotations, desiredSts.Spec.Template.Annotations, nil); changed { + _, err = c.KubeClient.Pods(c.Namespace).Patch(context.TODO(), pod.Name, types.StrategicMergePatchType, patch, metav1.PatchOptions{}) if err != nil { return fmt.Errorf("could not patch annotations for pod %q: %v", pod.Name, err) } @@ -646,12 +717,14 @@ func (c *Cluster) syncStatefulSet() error { c.logger.Debug("syncing Patroni config") if configPatched, restartPrimaryFirst, restartWait, err = c.syncPatroniConfig(pods, c.Spec.Patroni, requiredPgParameters); err != nil { c.logger.Warningf("Patroni config updated? %v - errors during config sync: %v", configPatched, err) + postponeReasons = append(postponeReasons, "errors during Patroni config sync") isSafeToRecreatePods = false } // restart Postgres where it is still pending if err = c.restartInstances(pods, restartWait, restartPrimaryFirst); err != nil { c.logger.Errorf("errors while restarting Postgres in pods via Patroni API: %v", err) + postponeReasons = append(postponeReasons, "errors while restarting Postgres via Patroni API") isSafeToRecreatePods = false } @@ -666,7 +739,7 @@ func (c *Cluster) syncStatefulSet() error { } c.eventRecorder.Event(c.GetReference(), v1.EventTypeNormal, "Update", "Rolling update done - pods have been recreated") } else { - c.logger.Warningf("postpone pod recreation until next sync because of errors during config sync") + c.logger.Warningf("postpone pod recreation until next sync - reason: %s", strings.Join(postponeReasons, `', '`)) } } @@ -986,40 +1059,45 @@ func (c *Cluster) syncStandbyClusterConfiguration() error { func (c *Cluster) syncSecrets() error { c.logger.Debug("syncing secrets") c.setProcessName("syncing secrets") + errors := make([]string, 0) generatedSecrets := c.generateUserSecrets() retentionUsers := make([]string, 0) currentTime := time.Now() for secretUsername, generatedSecret := range generatedSecrets { - secret, err := c.KubeClient.Secrets(generatedSecret.Namespace).Create(context.TODO(), generatedSecret, metav1.CreateOptions{}) + pgUserDegraded := false + createdSecret, err := c.KubeClient.Secrets(generatedSecret.Namespace).Create(context.TODO(), generatedSecret, metav1.CreateOptions{}) if err == nil { - c.Secrets[secret.UID] = secret - c.logger.Infof("created new secret %s, namespace: %s, uid: %s", util.NameFromMeta(secret.ObjectMeta), generatedSecret.Namespace, secret.UID) + c.Secrets[createdSecret.UID] = createdSecret + c.logger.Infof("created new secret %s, namespace: %s, uid: %s", util.NameFromMeta(createdSecret.ObjectMeta), generatedSecret.Namespace, createdSecret.UID) continue } if k8sutil.ResourceAlreadyExists(err) { - if err = c.updateSecret(secretUsername, generatedSecret, &retentionUsers, currentTime); err != nil { - c.logger.Warningf("syncing secret %s failed: %v", util.NameFromMeta(secret.ObjectMeta), err) + updatedSecret, err := c.updateSecret(secretUsername, generatedSecret, &retentionUsers, currentTime) + if err == nil { + c.Secrets[updatedSecret.UID] = updatedSecret + continue } + errors = append(errors, fmt.Sprintf("syncing secret %s failed: %v", util.NameFromMeta(generatedSecret.ObjectMeta), err)) + pgUserDegraded = true } else { - return fmt.Errorf("could not create secret for user %s: in namespace %s: %v", secretUsername, generatedSecret.Namespace, err) + errors = append(errors, fmt.Sprintf("could not create secret for user %s: in namespace %s: %v", secretUsername, generatedSecret.Namespace, err)) + pgUserDegraded = true } + c.updatePgUser(secretUsername, pgUserDegraded) } // remove rotation users that exceed the retention interval if len(retentionUsers) > 0 { - err := c.initDbConn() - if err != nil { - return fmt.Errorf("could not init db connection: %v", err) - } - if err = c.cleanupRotatedUsers(retentionUsers, c.pgDb); err != nil { - return fmt.Errorf("error removing users exceeding configured retention interval: %v", err) - } - if err := c.closeDbConn(); err != nil { - c.logger.Errorf("could not close database connection after removing users exceeding configured retention interval: %v", err) + if err := c.cleanupRotatedUsers(retentionUsers); err != nil { + errors = append(errors, fmt.Sprintf("error removing users exceeding configured retention interval: %v", err)) } } + if len(errors) > 0 { + return fmt.Errorf("%v", strings.Join(errors, `', '`)) + } + return nil } @@ -1032,7 +1110,7 @@ func (c *Cluster) updateSecret( secretUsername string, generatedSecret *v1.Secret, retentionUsers *[]string, - currentTime time.Time) error { + currentTime time.Time) (*v1.Secret, error) { var ( secret *v1.Secret err error @@ -1042,20 +1120,21 @@ func (c *Cluster) updateSecret( // get the secret first if secret, err = c.KubeClient.Secrets(generatedSecret.Namespace).Get(context.TODO(), generatedSecret.Name, metav1.GetOptions{}); err != nil { - return fmt.Errorf("could not get current secret: %v", err) + return generatedSecret, fmt.Errorf("could not get current secret: %v", err) } c.Secrets[secret.UID] = secret // fetch user map to update later var userMap map[string]spec.PgUser var userKey string - if secretUsername == c.systemUsers[constants.SuperuserKeyName].Name { + switch secretUsername { + case c.systemUsers[constants.SuperuserKeyName].Name: userKey = constants.SuperuserKeyName userMap = c.systemUsers - } else if secretUsername == c.systemUsers[constants.ReplicationUserKeyName].Name { + case c.systemUsers[constants.ReplicationUserKeyName].Name: userKey = constants.ReplicationUserKeyName userMap = c.systemUsers - } else { + default: userKey = secretUsername userMap = c.pgUsers } @@ -1101,13 +1180,18 @@ func (c *Cluster) updateSecret( } } else { // username might not match if password rotation has been disabled again - if secretUsername != string(secret.Data["username"]) { + usernameFromSecret := string(secret.Data["username"]) + if secretUsername != usernameFromSecret { + // handle edge case when manifest user conflicts with a user from prepared databases + if strings.Replace(usernameFromSecret, "-", "_", -1) == strings.Replace(secretUsername, "-", "_", -1) { + return nil, fmt.Errorf("could not update secret because of user name mismatch: expected: %s, got: %s", secretUsername, usernameFromSecret) + } *retentionUsers = append(*retentionUsers, secretUsername) secret.Data["username"] = []byte(secretUsername) secret.Data["password"] = []byte(util.RandomPassword(constants.PasswordLength)) secret.Data["nextRotation"] = []byte{} updateSecret = true - updateSecretMsg = fmt.Sprintf("secret %s does not contain the role %s - updating username and resetting password", secretName, secretUsername) + updateSecretMsg = fmt.Sprintf("secret does not contain the role %s - updating username and resetting password", secretUsername) } } @@ -1135,26 +1219,24 @@ func (c *Cluster) updateSecret( } if updateSecret { - c.logger.Infof(updateSecretMsg) + c.logger.Infof("%s", updateSecretMsg) if secret, err = c.KubeClient.Secrets(secret.Namespace).Update(context.TODO(), secret, metav1.UpdateOptions{}); err != nil { - return fmt.Errorf("could not update secret %s: %v", secretName, err) + return nil, fmt.Errorf("could not update secret: %v", err) } - c.Secrets[secret.UID] = secret } - if changed, _ := c.compareAnnotations(secret.Annotations, generatedSecret.Annotations); changed { + if changed, _ := c.compareAnnotations(secret.Annotations, generatedSecret.Annotations, nil); changed { patchData, err := metaAnnotationsPatch(generatedSecret.Annotations) if err != nil { - return fmt.Errorf("could not form patch for secret %q annotations: %v", secret.Name, err) + return nil, fmt.Errorf("could not form patch for secret annotations: %v", err) } secret, err = c.KubeClient.Secrets(secret.Namespace).Patch(context.TODO(), secret.Name, types.MergePatchType, []byte(patchData), metav1.PatchOptions{}) if err != nil { - return fmt.Errorf("could not patch annotations for secret %q: %v", secret.Name, err) + return nil, fmt.Errorf("could not patch annotations for secret: %v", err) } - c.Secrets[secret.UID] = secret } - return nil + return secret, nil } func (c *Cluster) rotatePasswordInSecret( @@ -1260,6 +1342,23 @@ func (c *Cluster) rotatePasswordInSecret( return updateSecretMsg, nil } +func (c *Cluster) updatePgUser(secretUsername string, degraded bool) { + for key, pgUser := range c.pgUsers { + if pgUser.Name == secretUsername { + pgUser.Degraded = degraded + c.pgUsers[key] = pgUser + return + } + } + for key, pgUser := range c.systemUsers { + if pgUser.Name == secretUsername { + pgUser.Degraded = degraded + c.systemUsers[key] = pgUser + return + } + } +} + func (c *Cluster) syncRoles() (err error) { c.setProcessName("syncing roles") @@ -1587,19 +1686,38 @@ func (c *Cluster) syncLogicalBackupJob() error { } c.logger.Infof("logical backup job %s updated", c.getLogicalBackupJobName()) } - if match, reason := c.compareLogicalBackupJob(job, desiredJob); !match { + if cmp := c.compareLogicalBackupJob(job, desiredJob); !cmp.match { c.logger.Infof("logical job %s is not in the desired state and needs to be updated", c.getLogicalBackupJobName(), ) - if reason != "" { - c.logger.Infof("reason: %s", reason) + if len(cmp.reasons) != 0 { + for _, reason := range cmp.reasons { + c.logger.Infof("reason: %s", reason) + } + } + if len(cmp.deletedPodAnnotations) != 0 { + templateMetadataReq := map[string]map[string]map[string]map[string]map[string]map[string]map[string]*string{ + "spec": {"jobTemplate": {"spec": {"template": {"metadata": {"annotations": {}}}}}}} + for _, anno := range cmp.deletedPodAnnotations { + templateMetadataReq["spec"]["jobTemplate"]["spec"]["template"]["metadata"]["annotations"][anno] = nil + } + patch, err := json.Marshal(templateMetadataReq) + if err != nil { + return fmt.Errorf("could not marshal ObjectMeta for logical backup job %q pod template: %v", jobName, err) + } + + job, err = c.KubeClient.CronJobs(c.Namespace).Patch(context.TODO(), jobName, types.StrategicMergePatchType, patch, metav1.PatchOptions{}, "") + if err != nil { + c.logger.Errorf("failed to remove annotations from the logical backup job %q pod template: %v", jobName, err) + return err + } } if err = c.patchLogicalBackupJob(desiredJob); err != nil { return fmt.Errorf("could not update logical backup job to match desired state: %v", err) } c.logger.Info("the logical backup job is synced") } - if changed, _ := c.compareAnnotations(job.Annotations, desiredJob.Annotations); changed { + if changed, _ := c.compareAnnotations(job.Annotations, desiredJob.Annotations, nil); changed { patchData, err := metaAnnotationsPatch(desiredJob.Annotations) if err != nil { return fmt.Errorf("could not form patch for the logical backup job %q: %v", jobName, err) diff --git a/pkg/cluster/sync_test.go b/pkg/cluster/sync_test.go index d45a193cb..87e9dc8a5 100644 --- a/pkg/cluster/sync_test.go +++ b/pkg/cluster/sync_test.go @@ -2,20 +2,22 @@ package cluster import ( "bytes" + "context" "fmt" "io" "net/http" + "slices" "testing" "time" - "context" - - "golang.org/x/exp/slices" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + k8stesting "k8s.io/client-go/testing" "github.com/golang/mock/gomock" + "github.com/pkg/errors" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/zalando/postgres-operator/mocks" @@ -51,6 +53,16 @@ func newFakeK8sSyncClient() (k8sutil.KubernetesClient, *fake.Clientset) { } func newFakeK8sSyncSecretsClient() (k8sutil.KubernetesClient, *fake.Clientset) { + // add a reactor that checks namespace existence before creating secrets + clientSet.PrependReactor("create", "secrets", func(action k8stesting.Action) (bool, runtime.Object, error) { + createAction := action.(k8stesting.CreateAction) + secret := createAction.GetObject().(*v1.Secret) + if secret.Namespace != "default" { + return true, nil, errors.New("namespace does not exist") + } + return false, nil, nil + }) + return k8sutil.KubernetesClient{ SecretsGetter: clientSet.CoreV1(), }, clientSet @@ -142,6 +154,181 @@ func TestSyncStatefulSetsAnnotations(t *testing.T) { } } +func TestPodAnnotationsSync(t *testing.T) { + clusterName := "acid-test-cluster-2" + namespace := "default" + podAnnotation := "no-scale-down" + podAnnotations := map[string]string{podAnnotation: "true"} + customPodAnnotation := "foo" + customPodAnnotations := map[string]string{customPodAnnotation: "true"} + + ctrl := gomock.NewController(t) + defer ctrl.Finish() + mockClient := mocks.NewMockHTTPClient(ctrl) + client, _ := newFakeK8sAnnotationsClient() + + pg := acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + }, + Spec: acidv1.PostgresSpec{ + Volume: acidv1.Volume{ + Size: "1Gi", + }, + EnableConnectionPooler: boolToPointer(true), + EnableLogicalBackup: true, + EnableReplicaConnectionPooler: boolToPointer(true), + PodAnnotations: podAnnotations, + NumberOfInstances: 2, + }, + } + + var cluster = New( + Config{ + OpConfig: config.Config{ + PatroniAPICheckInterval: time.Duration(1), + PatroniAPICheckTimeout: time.Duration(5), + PodManagementPolicy: "ordered_ready", + CustomPodAnnotations: customPodAnnotations, + ConnectionPooler: config.ConnectionPooler{ + ConnectionPoolerDefaultCPURequest: "100m", + ConnectionPoolerDefaultCPULimit: "100m", + ConnectionPoolerDefaultMemoryRequest: "100Mi", + ConnectionPoolerDefaultMemoryLimit: "100Mi", + NumberOfInstances: k8sutil.Int32ToPointer(1), + }, + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + DefaultCPURequest: "300m", + DefaultCPULimit: "300m", + DefaultMemoryRequest: "300Mi", + DefaultMemoryLimit: "300Mi", + MaxInstances: -1, + PodRoleLabel: "spilo-role", + ResourceCheckInterval: time.Duration(3), + ResourceCheckTimeout: time.Duration(10), + }, + }, + }, client, pg, logger, eventRecorder) + + configJson := `{"postgresql": {"parameters": {"log_min_duration_statement": 200, "max_connections": 50}}}, "ttl": 20}` + response := http.Response{ + StatusCode: 200, + Body: io.NopCloser(bytes.NewReader([]byte(configJson))), + } + + mockClient.EXPECT().Do(gomock.Any()).Return(&response, nil).AnyTimes() + cluster.patroni = patroni.New(patroniLogger, mockClient) + cluster.Name = clusterName + cluster.Namespace = namespace + clusterOptions := clusterLabelsOptions(cluster) + + // create a statefulset + _, err := cluster.createStatefulSet() + assert.NoError(t, err) + // create a pods + podsList := createPods(cluster) + for _, pod := range podsList { + _, err = cluster.KubeClient.Pods(namespace).Create(context.TODO(), &pod, metav1.CreateOptions{}) + assert.NoError(t, err) + } + // create connection pooler + _, err = cluster.createConnectionPooler(mockInstallLookupFunction) + assert.NoError(t, err) + + // create cron job + err = cluster.createLogicalBackupJob() + assert.NoError(t, err) + + annotateResources(cluster) + err = cluster.Sync(&cluster.Postgresql) + assert.NoError(t, err) + + // 1. PodAnnotations set + stsList, err := cluster.KubeClient.StatefulSets(namespace).List(context.TODO(), clusterOptions) + assert.NoError(t, err) + for _, sts := range stsList.Items { + for _, annotation := range []string{podAnnotation, customPodAnnotation} { + assert.Contains(t, sts.Spec.Template.Annotations, annotation) + } + } + + for _, role := range []PostgresRole{Master, Replica} { + deploy, err := cluster.KubeClient.Deployments(namespace).Get(context.TODO(), cluster.connectionPoolerName(role), metav1.GetOptions{}) + assert.NoError(t, err) + for _, annotation := range []string{podAnnotation, customPodAnnotation} { + assert.Contains(t, deploy.Spec.Template.Annotations, annotation, + fmt.Sprintf("pooler deployment pod template %s should contain annotation %s, found %#v", + deploy.Name, annotation, deploy.Spec.Template.Annotations)) + } + } + + podList, err := cluster.KubeClient.Pods(namespace).List(context.TODO(), clusterOptions) + assert.NoError(t, err) + for _, pod := range podList.Items { + for _, annotation := range []string{podAnnotation, customPodAnnotation} { + assert.Contains(t, pod.Annotations, annotation, + fmt.Sprintf("pod %s should contain annotation %s, found %#v", pod.Name, annotation, pod.Annotations)) + } + } + + cronJobList, err := cluster.KubeClient.CronJobs(namespace).List(context.TODO(), clusterOptions) + assert.NoError(t, err) + for _, cronJob := range cronJobList.Items { + for _, annotation := range []string{podAnnotation, customPodAnnotation} { + assert.Contains(t, cronJob.Spec.JobTemplate.Spec.Template.Annotations, annotation, + fmt.Sprintf("logical backup cron job's pod template should contain annotation %s, found %#v", + annotation, cronJob.Spec.JobTemplate.Spec.Template.Annotations)) + } + } + + // 2 PodAnnotations removed + newSpec := cluster.Postgresql.DeepCopy() + newSpec.Spec.PodAnnotations = nil + cluster.OpConfig.CustomPodAnnotations = nil + err = cluster.Sync(newSpec) + assert.NoError(t, err) + + stsList, err = cluster.KubeClient.StatefulSets(namespace).List(context.TODO(), clusterOptions) + assert.NoError(t, err) + for _, sts := range stsList.Items { + for _, annotation := range []string{podAnnotation, customPodAnnotation} { + assert.NotContains(t, sts.Spec.Template.Annotations, annotation) + } + } + + for _, role := range []PostgresRole{Master, Replica} { + deploy, err := cluster.KubeClient.Deployments(namespace).Get(context.TODO(), cluster.connectionPoolerName(role), metav1.GetOptions{}) + assert.NoError(t, err) + for _, annotation := range []string{podAnnotation, customPodAnnotation} { + assert.NotContains(t, deploy.Spec.Template.Annotations, annotation, + fmt.Sprintf("pooler deployment pod template %s should not contain annotation %s, found %#v", + deploy.Name, annotation, deploy.Spec.Template.Annotations)) + } + } + + podList, err = cluster.KubeClient.Pods(namespace).List(context.TODO(), clusterOptions) + assert.NoError(t, err) + for _, pod := range podList.Items { + for _, annotation := range []string{podAnnotation, customPodAnnotation} { + assert.NotContains(t, pod.Annotations, annotation, + fmt.Sprintf("pod %s should not contain annotation %s, found %#v", pod.Name, annotation, pod.Annotations)) + } + } + + cronJobList, err = cluster.KubeClient.CronJobs(namespace).List(context.TODO(), clusterOptions) + assert.NoError(t, err) + for _, cronJob := range cronJobList.Items { + for _, annotation := range []string{podAnnotation, customPodAnnotation} { + assert.NotContains(t, cronJob.Spec.JobTemplate.Spec.Template.Annotations, annotation, + fmt.Sprintf("logical backup cron job's pod template should not contain annotation %s, found %#v", + annotation, cronJob.Spec.JobTemplate.Spec.Template.Annotations)) + } + } +} + func TestCheckAndSetGlobalPostgreSQLConfiguration(t *testing.T) { testName := "test config comparison" client, _ := newFakeK8sSyncClient() @@ -636,7 +823,7 @@ func TestUpdateSecret(t *testing.T) { }, Spec: acidv1.PostgresSpec{ Databases: map[string]string{dbname: dbowner}, - Users: map[string]acidv1.UserFlags{appUser: {}, "bar": {}, dbowner: {}}, + Users: map[string]acidv1.UserFlags{appUser: {}, "bar": {}, dbowner: {}, "not-exist.test_user": {}}, UsersIgnoringSecretRotation: []string{"bar"}, UsersWithInPlaceSecretRotation: []string{dbowner}, Streams: []acidv1.Stream{ @@ -668,6 +855,7 @@ func TestUpdateSecret(t *testing.T) { PasswordRotationInterval: 1, PasswordRotationUserRetention: 3, }, + EnableCrossNamespaceSecret: true, Resources: config.Resources{ ClusterLabels: map[string]string{"application": "spilo"}, ClusterNameLabel: "cluster-name", @@ -690,7 +878,9 @@ func TestUpdateSecret(t *testing.T) { allUsers := make(map[string]spec.PgUser) for _, pgUser := range cluster.pgUsers { - allUsers[pgUser.Name] = pgUser + if !pgUser.Degraded { + allUsers[pgUser.Name] = pgUser + } } for _, systemUser := range cluster.systemUsers { allUsers[systemUser.Name] = systemUser @@ -774,3 +964,57 @@ func TestUpdateSecret(t *testing.T) { t.Errorf("%s: updated secret does not contain expected username: expected %s, got %s", testName, appUser, currentUsername) } } + +func TestUpdateSecretNameConflict(t *testing.T) { + client, _ := newFakeK8sSyncSecretsClient() + + clusterName := "acid-test-cluster" + namespace := "default" + secretTemplate := config.StringTemplate("{username}.{cluster}.credentials") + + // define manifest user that has the same name as a prepared database owner user except for dashes vs underscores + // because of this the operator cannot create both secrets because underscores are not allowed in k8s secret names + pg := acidv1.Postgresql{ + ObjectMeta: metav1.ObjectMeta{ + Name: clusterName, + Namespace: namespace, + }, + Spec: acidv1.PostgresSpec{ + PreparedDatabases: map[string]acidv1.PreparedDatabase{"prepared": {DefaultUsers: true}}, + Users: map[string]acidv1.UserFlags{"prepared-owner-user": {}}, + Volume: acidv1.Volume{ + Size: "1Gi", + }, + }, + } + + var cluster = New( + Config{ + OpConfig: config.Config{ + Auth: config.Auth{ + SuperUsername: "postgres", + ReplicationUsername: "standby", + SecretNameTemplate: secretTemplate, + }, + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + }, + }, + }, client, pg, logger, eventRecorder) + + cluster.Name = clusterName + cluster.Namespace = namespace + cluster.pgUsers = map[string]spec.PgUser{} + + // init all users + cluster.initUsers() + // create secrets and fail because of user name mismatch + // prepared-owner-user from manifest vs prepared_owner_user from prepared database + err := cluster.syncSecrets() + assert.Error(t, err) + + // the order of secrets to sync is not deterministic, check only first part of the error message + expectedError := fmt.Sprintf("syncing secret %s failed: could not update secret because of user name mismatch", "default/prepared-owner-user.acid-test-cluster.credentials") + assert.Contains(t, err.Error(), expectedError) +} diff --git a/pkg/cluster/types.go b/pkg/cluster/types.go index 8e9263d49..17c4e705e 100644 --- a/pkg/cluster/types.go +++ b/pkg/cluster/types.go @@ -58,15 +58,16 @@ type WorkerStatus struct { // ClusterStatus describes status of the cluster type ClusterStatus struct { - Team string - Cluster string - Namespace string - MasterService *v1.Service - ReplicaService *v1.Service - MasterEndpoint *v1.Endpoints - ReplicaEndpoint *v1.Endpoints - StatefulSet *appsv1.StatefulSet - PodDisruptionBudget *policyv1.PodDisruptionBudget + Team string + Cluster string + Namespace string + MasterService *v1.Service + ReplicaService *v1.Service + MasterEndpoint *v1.Endpoints + ReplicaEndpoint *v1.Endpoints + StatefulSet *appsv1.StatefulSet + PrimaryPodDisruptionBudget *policyv1.PodDisruptionBudget + CriticalOpPodDisruptionBudget *policyv1.PodDisruptionBudget CurrentProcess Process Worker uint32 diff --git a/pkg/cluster/util.go b/pkg/cluster/util.go index c570fcc3a..06a35f1b7 100644 --- a/pkg/cluster/util.go +++ b/pkg/cluster/util.go @@ -257,9 +257,9 @@ func (c *Cluster) getTeamMembers(teamID string) ([]string, error) { if teamID == "" { msg := "no teamId specified" if c.OpConfig.EnableTeamIdClusternamePrefix { - return nil, fmt.Errorf(msg) + return nil, fmt.Errorf("%s", msg) } - c.logger.Warnf(msg) + c.logger.Warnf("%s", msg) return nil, nil } @@ -663,7 +663,7 @@ func parseResourceRequirements(resourcesRequirement v1.ResourceRequirements) (ac return resources, nil } -func isInMainternanceWindow(specMaintenanceWindows []acidv1.MaintenanceWindow) bool { +func isInMaintenanceWindow(specMaintenanceWindows []acidv1.MaintenanceWindow) bool { if len(specMaintenanceWindows) == 0 { return true } diff --git a/pkg/cluster/util_test.go b/pkg/cluster/util_test.go index 2cb755c6c..9cd7dc7e9 100644 --- a/pkg/cluster/util_test.go +++ b/pkg/cluster/util_test.go @@ -247,18 +247,18 @@ func createPods(cluster *Cluster) []v1.Pod { for i, role := range []PostgresRole{Master, Replica} { podsList = append(podsList, v1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-%d", clusterName, i), + Name: fmt.Sprintf("%s-%d", cluster.Name, i), Namespace: namespace, Labels: map[string]string{ "application": "spilo", - "cluster-name": clusterName, + "cluster-name": cluster.Name, "spilo-role": string(role), }, }, }) podsList = append(podsList, v1.Pod{ ObjectMeta: metav1.ObjectMeta{ - Name: fmt.Sprintf("%s-pooler-%s", clusterName, role), + Name: fmt.Sprintf("%s-pooler-%s", cluster.Name, role), Namespace: namespace, Labels: cluster.connectionPoolerLabels(role, true).MatchLabels, }, @@ -329,7 +329,7 @@ func newInheritedAnnotationsCluster(client k8sutil.KubernetesClient) (*Cluster, if err != nil { return nil, err } - _, err = cluster.createPodDisruptionBudget() + err = cluster.createPodDisruptionBudgets() if err != nil { return nil, err } @@ -705,8 +705,8 @@ func TestIsInMaintenanceWindow(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { cluster.Spec.MaintenanceWindows = tt.windows - if isInMainternanceWindow(cluster.Spec.MaintenanceWindows) != tt.expected { - t.Errorf("Expected isInMainternanceWindow to return %t", tt.expected) + if isInMaintenanceWindow(cluster.Spec.MaintenanceWindows) != tt.expected { + t.Errorf("Expected isInMaintenanceWindow to return %t", tt.expected) } }) } diff --git a/pkg/cluster/volumes.go b/pkg/cluster/volumes.go index 240220ccf..7aa70a5d1 100644 --- a/pkg/cluster/volumes.go +++ b/pkg/cluster/volumes.go @@ -129,7 +129,7 @@ func (c *Cluster) syncUnderlyingEBSVolume() error { if len(errors) > 0 { for _, s := range errors { - c.logger.Warningf(s) + c.logger.Warningf("%s", s) } } return nil @@ -225,7 +225,7 @@ func (c *Cluster) syncVolumeClaims() error { } newAnnotations := c.annotationsSet(nil) - if changed, _ := c.compareAnnotations(pvc.Annotations, newAnnotations); changed { + if changed, _ := c.compareAnnotations(pvc.Annotations, newAnnotations, nil); changed { patchData, err := metaAnnotationsPatch(newAnnotations) if err != nil { return fmt.Errorf("could not form patch for the persistent volume claim for volume %q: %v", pvc.Name, err) diff --git a/pkg/controller/operator_config.go b/pkg/controller/operator_config.go index 5739f6314..4650fe8d7 100644 --- a/pkg/controller/operator_config.go +++ b/pkg/controller/operator_config.go @@ -39,7 +39,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur result.EnableTeamIdClusternamePrefix = fromCRD.EnableTeamIdClusternamePrefix result.EtcdHost = fromCRD.EtcdHost result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps - result.DockerImage = util.Coalesce(fromCRD.DockerImage, "ghcr.io/zalando/spilo-17:4.0-p2") + result.DockerImage = util.Coalesce(fromCRD.DockerImage, "ghcr.io/zalando/spilo-17:4.0-p3") result.Workers = util.CoalesceUInt32(fromCRD.Workers, 8) result.MinInstances = fromCRD.MinInstances result.MaxInstances = fromCRD.MaxInstances @@ -180,7 +180,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur // logical backup config result.LogicalBackupSchedule = util.Coalesce(fromCRD.LogicalBackup.Schedule, "30 00 * * *") - result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0") + result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "ghcr.io/zalando/postgres-operator/logical-backup:v1.15.1") result.LogicalBackupProvider = util.Coalesce(fromCRD.LogicalBackup.BackupProvider, "s3") result.LogicalBackupAzureStorageAccountName = fromCRD.LogicalBackup.AzureStorageAccountName result.LogicalBackupAzureStorageAccountKey = fromCRD.LogicalBackup.AzureStorageAccountKey diff --git a/pkg/controller/postgresql.go b/pkg/controller/postgresql.go index 42d96278c..824a030f4 100644 --- a/pkg/controller/postgresql.go +++ b/pkg/controller/postgresql.go @@ -597,7 +597,7 @@ func (c *Controller) createPodServiceAccount(namespace string) error { _, err := c.KubeClient.ServiceAccounts(namespace).Get(context.TODO(), podServiceAccountName, metav1.GetOptions{}) if k8sutil.ResourceNotFound(err) { - c.logger.Infof(fmt.Sprintf("creating pod service account %q in the %q namespace", podServiceAccountName, namespace)) + c.logger.Infof("creating pod service account %q in the %q namespace", podServiceAccountName, namespace) // get a separate copy of service account // to prevent a race condition when setting a namespace for many clusters diff --git a/pkg/controller/util.go b/pkg/controller/util.go index 59e608ad0..df043dfd9 100644 --- a/pkg/controller/util.go +++ b/pkg/controller/util.go @@ -248,7 +248,7 @@ func (c *Controller) getInfrastructureRoles( } if len(errors) > 0 { - return uniqRoles, fmt.Errorf(strings.Join(errors, `', '`)) + return uniqRoles, fmt.Errorf("%s", strings.Join(errors, `', '`)) } return uniqRoles, nil diff --git a/pkg/spec/types.go b/pkg/spec/types.go index d727aee42..c08cc5c61 100644 --- a/pkg/spec/types.go +++ b/pkg/spec/types.go @@ -58,6 +58,7 @@ type PgUser struct { IsDbOwner bool `yaml:"is_db_owner"` Deleted bool `yaml:"deleted"` Rotated bool `yaml:"rotated"` + Degraded bool `yaml:"degraded"` } func (user *PgUser) Valid() bool { diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index 30b967beb..aca9754a9 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -127,7 +127,7 @@ type Scalyr struct { // LogicalBackup defines configuration for logical backup type LogicalBackup struct { LogicalBackupSchedule string `name:"logical_backup_schedule" default:"30 00 * * *"` - LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0"` + LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"ghcr.io/zalando/postgres-operator/logical-backup:v1.15.1"` LogicalBackupProvider string `name:"logical_backup_provider" default:"s3"` LogicalBackupAzureStorageAccountName string `name:"logical_backup_azure_storage_account_name" default:""` LogicalBackupAzureStorageContainer string `name:"logical_backup_azure_storage_container" default:""` @@ -175,7 +175,7 @@ type Config struct { WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to' KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"` EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS - DockerImage string `name:"docker_image" default:"ghcr.io/zalando/spilo-17:4.0-p2"` + DockerImage string `name:"docker_image" default:"ghcr.io/zalando/spilo-17:4.0-p3"` SidecarImages map[string]string `name:"sidecar_docker_images"` // deprecated in favour of SidecarContainers SidecarContainers []v1.Container `name:"sidecars"` PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"` diff --git a/pkg/util/patroni/patroni.go b/pkg/util/patroni/patroni.go index 4d580f1c2..2129f1acc 100644 --- a/pkg/util/patroni/patroni.go +++ b/pkg/util/patroni/patroni.go @@ -20,19 +20,19 @@ import ( ) const ( - failoverPath = "/failover" - configPath = "/config" - clusterPath = "/cluster" - statusPath = "/patroni" - restartPath = "/restart" - ApiPort = 8008 - timeout = 30 * time.Second + switchoverPath = "/switchover" + configPath = "/config" + clusterPath = "/cluster" + statusPath = "/patroni" + restartPath = "/restart" + ApiPort = 8008 + timeout = 30 * time.Second ) // Interface describe patroni methods type Interface interface { GetClusterMembers(master *v1.Pod) ([]ClusterMember, error) - Switchover(master *v1.Pod, candidate string) error + Switchover(master *v1.Pod, candidate string, scheduled_at string) error SetPostgresParameters(server *v1.Pod, options map[string]string) error SetStandbyClusterParameters(server *v1.Pod, options map[string]interface{}) error GetMemberData(server *v1.Pod) (MemberData, error) @@ -103,7 +103,7 @@ func (p *Patroni) httpPostOrPatch(method string, url string, body *bytes.Buffer) } }() - if resp.StatusCode != http.StatusOK { + if resp.StatusCode < http.StatusOK || resp.StatusCode >= 300 { bodyBytes, err := io.ReadAll(resp.Body) if err != nil { return fmt.Errorf("could not read response: %v", err) @@ -128,7 +128,7 @@ func (p *Patroni) httpGet(url string) (string, error) { return "", fmt.Errorf("could not read response: %v", err) } - if response.StatusCode != http.StatusOK { + if response.StatusCode < http.StatusOK || response.StatusCode >= 300 { return string(bodyBytes), fmt.Errorf("patroni returned '%d'", response.StatusCode) } @@ -136,9 +136,9 @@ func (p *Patroni) httpGet(url string) (string, error) { } // Switchover by calling Patroni REST API -func (p *Patroni) Switchover(master *v1.Pod, candidate string) error { +func (p *Patroni) Switchover(master *v1.Pod, candidate string, scheduled_at string) error { buf := &bytes.Buffer{} - err := json.NewEncoder(buf).Encode(map[string]string{"leader": master.Name, "member": candidate}) + err := json.NewEncoder(buf).Encode(map[string]string{"leader": master.Name, "member": candidate, "scheduled_at": scheduled_at}) if err != nil { return fmt.Errorf("could not encode json: %v", err) } @@ -146,7 +146,7 @@ func (p *Patroni) Switchover(master *v1.Pod, candidate string) error { if err != nil { return err } - return p.httpPostOrPatch(http.MethodPost, apiURLString+failoverPath, buf) + return p.httpPostOrPatch(http.MethodPost, apiURLString+switchoverPath, buf) } //TODO: add an option call /patroni to check if it is necessary to restart the server diff --git a/pkg/util/users/users.go b/pkg/util/users/users.go index 924d8390e..b3b60df04 100644 --- a/pkg/util/users/users.go +++ b/pkg/util/users/users.go @@ -48,6 +48,10 @@ func (strategy DefaultUserSyncStrategy) ProduceSyncRequests(dbUsers spec.PgUserM if newUser.Deleted { continue } + // when the secret of the user could not be created or updated skip any database actions + if newUser.Degraded { + continue + } dbUser, exists := dbUsers[name] if !exists { reqs = append(reqs, spec.PgSyncUserRequest{Kind: spec.PGSyncUserAdd, User: newUser}) diff --git a/pkg/util/volumes/ebs.go b/pkg/util/volumes/ebs.go index cb8f8e97f..45850d55f 100644 --- a/pkg/util/volumes/ebs.go +++ b/pkg/util/volumes/ebs.go @@ -88,12 +88,13 @@ func (r *EBSVolumeResizer) DescribeVolumes(volumeIds []string) ([]VolumeProperti } for _, v := range volumeOutput.Volumes { - if *v.VolumeType == "gp3" { + switch *v.VolumeType { + case "gp3": p = append(p, VolumeProperties{VolumeID: *v.VolumeId, Size: *v.Size, VolumeType: *v.VolumeType, Iops: *v.Iops, Throughput: *v.Throughput}) - } else if *v.VolumeType == "gp2" { + case "gp2": p = append(p, VolumeProperties{VolumeID: *v.VolumeId, Size: *v.Size, VolumeType: *v.VolumeType}) - } else { - return nil, fmt.Errorf("Discovered unexpected volume type %s %s", *v.VolumeId, *v.VolumeType) + default: + return nil, fmt.Errorf("discovered unexpected volume type %s %s", *v.VolumeId, *v.VolumeType) } } diff --git a/ui/Dockerfile b/ui/Dockerfile index 51f1d7744..63170a24b 100644 --- a/ui/Dockerfile +++ b/ui/Dockerfile @@ -1,4 +1,4 @@ -ARG BASE_IMAGE=registry.opensource.zalan.do/library/python-3.11-slim:latest +ARG BASE_IMAGE=python:3.11-slim ARG NODE_IMAGE=node:lts-alpine FROM $NODE_IMAGE AS build @@ -17,7 +17,7 @@ WORKDIR /app RUN apt-get -qq -y update \ # https://www.psycopg.org/docs/install.html#psycopg-vs-psycopg-binary - && apt-get -qq -y install --no-install-recommends g++ libpq-dev python3-dev python3-distutils \ + && apt-get -qq -y install --no-install-recommends g++ libpq-dev python3-dev \ && apt-get -qq -y clean \ && rm -rf /var/lib/apt/lists/* diff --git a/ui/Makefile b/ui/Makefile index 8f88982ab..ff4cf94f6 100644 --- a/ui/Makefile +++ b/ui/Makefile @@ -1,6 +1,7 @@ .PHONY: clean test appjs docker push mock -IMAGE ?= registry.opensource.zalan.do/acid/postgres-operator-ui +IMAGE ?= postgres-operator-ui +BASE_IMAGE ?= python:3.11-slim VERSION ?= $(shell git describe --tags --always --dirty) TAG ?= $(VERSION) GITHEAD = $(shell git rev-parse --short HEAD) @@ -30,10 +31,7 @@ docker: appjs echo "Version ${VERSION}" echo "CDP tag ${CDP_TAG}" echo "git describe $(shell git describe --tags --always --dirty)" - docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)" -f Dockerfile . - -push: - docker push "$(IMAGE):$(TAG)$(CDP_TAG)" + docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)" -f Dockerfile --build-arg BASE_IMAGE="${BASE_IMAGE}" . mock: docker run -it -p 8081:8081 "$(IMAGE):$(TAG)" --mock diff --git a/ui/app/package.json b/ui/app/package.json index ef24834ca..7fd410bd7 100644 --- a/ui/app/package.json +++ b/ui/app/package.json @@ -1,6 +1,6 @@ { "name": "postgres-operator-ui", - "version": "1.14.0", + "version": "1.15.1", "description": "PostgreSQL Operator UI", "main": "src/app.js", "config": { @@ -38,7 +38,7 @@ "brfs": "^2.0.2", "dedent-js": "1.0.1", "eslint": "^8.32.0", - "js-yaml": "4.1.0", + "js-yaml": "4.1.1", "pug": "^3.0.2", "rimraf": "^4.1.2", "riot": "^3.13.2", diff --git a/ui/manifests/deployment.yaml b/ui/manifests/deployment.yaml index e09dd1e4f..8c664de22 100644 --- a/ui/manifests/deployment.yaml +++ b/ui/manifests/deployment.yaml @@ -18,7 +18,7 @@ spec: serviceAccountName: postgres-operator-ui containers: - name: "service" - image: ghcr.io/zalando/postgres-operator-ui:v1.14.0 + image: ghcr.io/zalando/postgres-operator-ui:v1.15.1 ports: - containerPort: 8081 protocol: "TCP" @@ -81,8 +81,6 @@ spec: ] } # Exemple of settings to make snapshot view working in the ui when using AWS - # - name: WALE_S3_ENDPOINT - # value: https+path://s3.us-east-1.amazonaws.com:443 # - name: SPILO_S3_BACKUP_PREFIX # value: spilo/ # - name: AWS_ACCESS_KEY_ID @@ -102,5 +100,3 @@ spec: # key: AWS_DEFAULT_REGION # - name: SPILO_S3_BACKUP_BUCKET # value: - # - name: "USE_AWS_INSTANCE_PROFILE" - # value: "true" diff --git a/ui/operator_ui/main.py b/ui/operator_ui/main.py index e02c2995c..bf28df6eb 100644 --- a/ui/operator_ui/main.py +++ b/ui/operator_ui/main.py @@ -95,14 +95,6 @@ DEFAULT_MEMORY_LIMIT = getenv('DEFAULT_MEMORY_LIMIT', '300Mi') DEFAULT_CPU = getenv('DEFAULT_CPU', '10m') DEFAULT_CPU_LIMIT = getenv('DEFAULT_CPU_LIMIT', '300m') -WALE_S3_ENDPOINT = getenv( - 'WALE_S3_ENDPOINT', - 'https+path://s3.eu-central-1.amazonaws.com:443', -) - -USE_AWS_INSTANCE_PROFILE = ( - getenv('USE_AWS_INSTANCE_PROFILE', 'false').lower() != 'false' -) AWS_ENDPOINT = getenv('AWS_ENDPOINT') @@ -784,8 +776,6 @@ def get_versions(pg_cluster: str): bucket=SPILO_S3_BACKUP_BUCKET, pg_cluster=pg_cluster, prefix=SPILO_S3_BACKUP_PREFIX, - s3_endpoint=WALE_S3_ENDPOINT, - use_aws_instance_profile=USE_AWS_INSTANCE_PROFILE, ), ) @@ -797,9 +787,8 @@ def get_basebackups(pg_cluster: str, uid: str): bucket=SPILO_S3_BACKUP_BUCKET, pg_cluster=pg_cluster, prefix=SPILO_S3_BACKUP_PREFIX, - s3_endpoint=WALE_S3_ENDPOINT, uid=uid, - use_aws_instance_profile=USE_AWS_INSTANCE_PROFILE, + postgresql_versions=OPERATOR_UI_CONFIG.get('postgresql_versions', DEFAULT_UI_CONFIG['postgresql_versions']), ), ) @@ -991,8 +980,6 @@ def main(port, debug, clusters: list): logger.info(f'Superuser team: {SUPERUSER_TEAM}') logger.info(f'Target namespace: {TARGET_NAMESPACE}') logger.info(f'Teamservice URL: {TEAM_SERVICE_URL}') - logger.info(f'Use AWS instance_profile: {USE_AWS_INSTANCE_PROFILE}') - logger.info(f'WAL-E S3 endpoint: {WALE_S3_ENDPOINT}') logger.info(f'AWS S3 endpoint: {AWS_ENDPOINT}') if TARGET_NAMESPACE is None: diff --git a/ui/operator_ui/spiloutils.py b/ui/operator_ui/spiloutils.py index f715430a1..6a2f03bb2 100644 --- a/ui/operator_ui/spiloutils.py +++ b/ui/operator_ui/spiloutils.py @@ -6,9 +6,8 @@ from os import environ, getenv from requests import Session from urllib.parse import urljoin from uuid import UUID -from wal_e.cmd import configure_backup_cxt -from .utils import Attrs, defaulting, these +from .utils import defaulting, these from operator_ui.adapters.logger import logger session = Session() @@ -284,10 +283,8 @@ def read_stored_clusters(bucket, prefix, delimiter='/'): def read_versions( pg_cluster, bucket, - s3_endpoint, prefix, delimiter='/', - use_aws_instance_profile=False, ): return [ 'base' if uid == 'wal' else uid @@ -305,35 +302,72 @@ def read_versions( if uid == 'wal' or defaulting(lambda: UUID(uid)) ] -BACKUP_VERSION_PREFIXES = ['', '10/', '11/', '12/', '13/', '14/', '15/', '16/', '17/'] +def lsn_to_wal_segment_stop(finish_lsn, start_segment, wal_segment_size=16 * 1024 * 1024): + timeline = int(start_segment[:8], 16) + log_id = finish_lsn >> 32 + seg_id = (finish_lsn & 0xFFFFFFFF) // wal_segment_size + return f"{timeline:08X}{log_id:08X}{seg_id:08X}" + +def lsn_to_offset_hex(lsn, wal_segment_size=16 * 1024 * 1024): + return f"{lsn % wal_segment_size:08X}" def read_basebackups( pg_cluster, uid, bucket, - s3_endpoint, prefix, - delimiter='/', - use_aws_instance_profile=False, + postgresql_versions, ): - environ['WALE_S3_ENDPOINT'] = s3_endpoint suffix = '' if uid == 'base' else '/' + uid backups = [] - for vp in BACKUP_VERSION_PREFIXES: + for vp in postgresql_versions: + backup_prefix = f'{prefix}{pg_cluster}{suffix}/wal/{vp}/basebackups_005/' + logger.info(f"{bucket}/{backup_prefix}") - backups = backups + [ - { - key: value - for key, value in basebackup.__dict__.items() - if isinstance(value, str) or isinstance(value, int) - } - for basebackup in Attrs.call( - f=configure_backup_cxt, - aws_instance_profile=use_aws_instance_profile, - s3_prefix=f's3://{bucket}/{prefix}{pg_cluster}{suffix}/wal/{vp}', - )._backup_list(detail=True) - ] + paginator = client('s3').get_paginator('list_objects_v2') + pages = paginator.paginate(Bucket=bucket, Prefix=backup_prefix) + + for page in pages: + for obj in page.get("Contents", []): + key = obj["Key"] + if not key.endswith("backup_stop_sentinel.json"): + continue + + response = client('s3').get_object(Bucket=bucket, Key=key) + backup_info = loads(response["Body"].read().decode("utf-8")) + last_modified = response["LastModified"].astimezone(timezone.utc).isoformat() + + backup_name = key.split("/")[-1].replace("_backup_stop_sentinel.json", "") + start_seg, start_offset = backup_name.split("_")[1], backup_name.split("_")[-1] if "_" in backup_name else None + + if "LSN" in backup_info and "FinishLSN" in backup_info: + # WAL-G + lsn = backup_info["LSN"] + finish_lsn = backup_info["FinishLSN"] + backups.append({ + "expanded_size_bytes": backup_info.get("UncompressedSize"), + "last_modified": last_modified, + "name": backup_name, + "wal_segment_backup_start": start_seg, + "wal_segment_backup_stop": lsn_to_wal_segment_stop(finish_lsn, start_seg), + "wal_segment_offset_backup_start": lsn_to_offset_hex(lsn), + "wal_segment_offset_backup_stop": lsn_to_offset_hex(finish_lsn), + }) + elif "wal_segment_backup_stop" in backup_info: + # WAL-E + stop_seg = backup_info["wal_segment_backup_stop"] + stop_offset = backup_info["wal_segment_offset_backup_stop"] + + backups.append({ + "expanded_size_bytes": backup_info.get("expanded_size_bytes"), + "last_modified": last_modified, + "name": backup_name, + "wal_segment_backup_start": start_seg, + "wal_segment_backup_stop": stop_seg, + "wal_segment_offset_backup_start": start_offset, + "wal_segment_offset_backup_stop": stop_offset, + }) return backups diff --git a/ui/requirements.txt b/ui/requirements.txt index d3318ceec..eaeafe3c1 100644 --- a/ui/requirements.txt +++ b/ui/requirements.txt @@ -9,7 +9,6 @@ jq==1.7.0 json_delta>=2.0.2 kubernetes==11.0.0 python-json-logger==2.0.7 -requests==2.32.2 +requests==2.32.4 stups-tokens>=1.1.19 -wal_e==1.1.1 -werkzeug==3.0.6 +werkzeug==3.1.4