diff --git a/.github/workflows/publish_ghcr_image.yaml b/.github/workflows/publish_ghcr_image.yaml index 7633ccc3c..d56ff2f17 100644 --- a/.github/workflows/publish_ghcr_image.yaml +++ b/.github/workflows/publish_ghcr_image.yaml @@ -23,7 +23,7 @@ jobs: - uses: actions/setup-go@v2 with: - go-version: "^1.22.5" + go-version: "^1.23.4" - name: Run unit tests run: make deps mocks test diff --git a/.github/workflows/run_e2e.yaml b/.github/workflows/run_e2e.yaml index df83a31c4..16573046e 100644 --- a/.github/workflows/run_e2e.yaml +++ b/.github/workflows/run_e2e.yaml @@ -14,7 +14,7 @@ jobs: - uses: actions/checkout@v1 - uses: actions/setup-go@v2 with: - go-version: "^1.22.5" + go-version: "^1.23.4" - name: Make dependencies run: make deps mocks - name: Code generation diff --git a/.github/workflows/run_tests.yaml b/.github/workflows/run_tests.yaml index c0e731e5e..db47f6e40 100644 --- a/.github/workflows/run_tests.yaml +++ b/.github/workflows/run_tests.yaml @@ -14,7 +14,7 @@ jobs: - uses: actions/checkout@v2 - uses: actions/setup-go@v2 with: - go-version: "^1.22.5" + go-version: "^1.23.4" - name: Make dependencies run: make deps mocks - name: Compile @@ -22,7 +22,7 @@ jobs: - name: Run unit tests run: go test -race -covermode atomic -coverprofile=coverage.out ./... - name: Convert coverage to lcov - uses: jandelgado/gcov2lcov-action@v1.0.9 + uses: jandelgado/gcov2lcov-action@v1.1.1 - name: Coveralls uses: coverallsapp/github-action@master with: diff --git a/.gitignore b/.gitignore index 66a8103d0..5938db216 100644 --- a/.gitignore +++ b/.gitignore @@ -104,3 +104,5 @@ e2e/tls mocks ui/.npm/ + +.DS_Store diff --git a/Makefile b/Makefile index 5944b6b8f..8fc4b36f6 100644 --- a/Makefile +++ b/Makefile @@ -69,7 +69,7 @@ docker: ${DOCKERDIR}/${DOCKERFILE} docker build --rm -t "$(IMAGE):$(TAG)$(CDP_TAG)$(DEBUG_FRESH)$(DEBUG_POSTFIX)" -f "${DOCKERDIR}/${DOCKERFILE}" --build-arg VERSION="${VERSION}" . indocker-race: - docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.22.5 bash -c "make linux" + docker run --rm -v "${GOPATH}":"${GOPATH}" -e GOPATH="${GOPATH}" -e RACE=1 -w ${PWD} golang:1.23.4 bash -c "make linux" push: docker push "$(IMAGE):$(TAG)$(CDP_TAG)" @@ -78,7 +78,7 @@ mocks: GO111MODULE=on go generate ./... tools: - GO111MODULE=on go get -d k8s.io/client-go@kubernetes-1.30.4 + GO111MODULE=on go get k8s.io/client-go@kubernetes-1.30.4 GO111MODULE=on go install github.com/golang/mock/mockgen@v1.6.0 GO111MODULE=on go mod tidy diff --git a/README.md b/README.md index c34bc6f6f..9493115de 100644 --- a/README.md +++ b/README.md @@ -28,13 +28,13 @@ pipelines with no access to Kubernetes API directly, promoting infrastructure as ### PostgreSQL features -* Supports PostgreSQL 16, starting from 12+ +* Supports PostgreSQL 17, starting from 13+ * Streaming replication cluster via Patroni * Point-In-Time-Recovery with -[pg_basebackup](https://www.postgresql.org/docs/16/app-pgbasebackup.html) / +[pg_basebackup](https://www.postgresql.org/docs/17/app-pgbasebackup.html) / [WAL-E](https://github.com/wal-e/wal-e) via [Spilo](https://github.com/zalando/spilo) * Preload libraries: [bg_mon](https://github.com/CyberDem0n/bg_mon), -[pg_stat_statements](https://www.postgresql.org/docs/16/pgstatstatements.html), +[pg_stat_statements](https://www.postgresql.org/docs/17/pgstatstatements.html), [pgextwlist](https://github.com/dimitri/pgextwlist), [pg_auth_mon](https://github.com/RafiaSabih/pg_auth_mon) * Incl. popular Postgres extensions such as @@ -57,12 +57,12 @@ production for over five years. | Release | Postgres versions | K8s versions | Golang | | :-------- | :---------------: | :---------------: | :-----: | +| v1.14.0 | 13 → 17 | 1.27+ | 1.23.4 | | v1.13.0 | 12 → 16 | 1.27+ | 1.22.5 | | v1.12.0 | 11 → 16 | 1.27+ | 1.22.3 | | v1.11.0 | 11 → 16 | 1.27+ | 1.21.7 | | v1.10.1 | 10 → 15 | 1.21+ | 1.19.8 | | v1.9.0 | 10 → 15 | 1.21+ | 1.18.9 | -| v1.8.2 | 9.5 → 14 | 1.20 → 1.24 | 1.17.4 | ## Getting started diff --git a/charts/postgres-operator-ui/Chart.yaml b/charts/postgres-operator-ui/Chart.yaml index 1d5597940..f4e2adf95 100644 --- a/charts/postgres-operator-ui/Chart.yaml +++ b/charts/postgres-operator-ui/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: postgres-operator-ui -version: 1.13.0 -appVersion: 1.13.0 +version: 1.14.0 +appVersion: 1.14.0 home: https://github.com/zalando/postgres-operator description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience keywords: diff --git a/charts/postgres-operator-ui/index.yaml b/charts/postgres-operator-ui/index.yaml index 1b89eeb60..dab9594e9 100644 --- a/charts/postgres-operator-ui/index.yaml +++ b/charts/postgres-operator-ui/index.yaml @@ -1,9 +1,32 @@ apiVersion: v1 entries: postgres-operator-ui: + - apiVersion: v2 + appVersion: 1.14.0 + created: "2024-12-23T11:26:07.721761867+01:00" + description: Postgres Operator UI provides a graphical interface for a convenient + database-as-a-service user experience + digest: e87ed898079a852957a67a4caf3fbd27b9098e413f5d961b7a771a6ae8b3e17c + home: https://github.com/zalando/postgres-operator + keywords: + - postgres + - operator + - ui + - cloud-native + - patroni + - spilo + maintainers: + - email: opensource@zalando.de + name: Zalando + name: postgres-operator-ui + sources: + - https://github.com/zalando/postgres-operator + urls: + - postgres-operator-ui-1.14.0.tgz + version: 1.14.0 - apiVersion: v2 appVersion: 1.13.0 - created: "2024-08-21T18:55:36.524305158+02:00" + created: "2024-12-23T11:26:07.719409282+01:00" description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience digest: e0444e516b50f82002d1a733527813c51759a627cefdd1005cea73659f824ea8 @@ -26,7 +49,7 @@ entries: version: 1.13.0 - apiVersion: v2 appVersion: 1.12.2 - created: "2024-08-21T18:55:36.521875733+02:00" + created: "2024-12-23T11:26:07.717202918+01:00" description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience digest: cbcef400c23ccece27d97369ad629278265c013e0a45c0b7f33e7568a082fedd @@ -49,7 +72,7 @@ entries: version: 1.12.2 - apiVersion: v2 appVersion: 1.11.0 - created: "2024-08-21T18:55:36.51959105+02:00" + created: "2024-12-23T11:26:07.714792146+01:00" description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience digest: a45f2284045c2a9a79750a36997386444f39b01ac722b17c84b431457577a3a2 @@ -72,7 +95,7 @@ entries: version: 1.11.0 - apiVersion: v2 appVersion: 1.10.1 - created: "2024-08-21T18:55:36.516518177+02:00" + created: "2024-12-23T11:26:07.712194397+01:00" description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience digest: 2e5e7a82aebee519ec57c6243eb8735124aa4585a3a19c66ffd69638fbeb11ce @@ -95,7 +118,7 @@ entries: version: 1.10.1 - apiVersion: v2 appVersion: 1.9.0 - created: "2024-08-21T18:55:36.52712908+02:00" + created: "2024-12-23T11:26:07.723891496+01:00" description: Postgres Operator UI provides a graphical interface for a convenient database-as-a-service user experience digest: df434af6c8b697fe0631017ecc25e3c79e125361ae6622347cea41a545153bdc @@ -116,4 +139,4 @@ entries: urls: - postgres-operator-ui-1.9.0.tgz version: 1.9.0 -generated: "2024-08-21T18:55:36.512456099+02:00" +generated: "2024-12-23T11:26:07.709192608+01:00" diff --git a/charts/postgres-operator-ui/postgres-operator-ui-1.14.0.tgz b/charts/postgres-operator-ui/postgres-operator-ui-1.14.0.tgz new file mode 100644 index 000000000..8e229d0f5 Binary files /dev/null and b/charts/postgres-operator-ui/postgres-operator-ui-1.14.0.tgz differ diff --git a/charts/postgres-operator-ui/templates/deployment.yaml b/charts/postgres-operator-ui/templates/deployment.yaml index 3161ae0a7..fbb9ee086 100644 --- a/charts/postgres-operator-ui/templates/deployment.yaml +++ b/charts/postgres-operator-ui/templates/deployment.yaml @@ -9,7 +9,7 @@ metadata: name: {{ template "postgres-operator-ui.fullname" . }} namespace: {{ .Release.Namespace }} spec: - replicas: 1 + replicas: {{ .Values.replicaCount }} selector: matchLabels: app.kubernetes.io/name: {{ template "postgres-operator-ui.name" . }} @@ -84,11 +84,11 @@ spec: "limit_iops": 16000, "limit_throughput": 1000, "postgresql_versions": [ + "17", "16", "15", "14", - "13", - "12" + "13" ] } {{- if .Values.extraEnvs }} @@ -102,4 +102,4 @@ spec: {{ toYaml .Values.tolerations | indent 8 }} {{- if .Values.priorityClassName }} priorityClassName: {{ .Values.priorityClassName }} - {{- end }} \ No newline at end of file + {{- end }} diff --git a/charts/postgres-operator-ui/values.yaml b/charts/postgres-operator-ui/values.yaml index 22f787826..da3c4baaf 100644 --- a/charts/postgres-operator-ui/values.yaml +++ b/charts/postgres-operator-ui/values.yaml @@ -8,7 +8,7 @@ replicaCount: 1 image: registry: ghcr.io repository: zalando/postgres-operator-ui - tag: v1.13.0 + tag: v1.14.0 pullPolicy: "IfNotPresent" # Optionally specify an array of imagePullSecrets. diff --git a/charts/postgres-operator/Chart.yaml b/charts/postgres-operator/Chart.yaml index 89b6dd15a..35852c488 100644 --- a/charts/postgres-operator/Chart.yaml +++ b/charts/postgres-operator/Chart.yaml @@ -1,7 +1,7 @@ apiVersion: v2 name: postgres-operator version: 1.14.0 -appVersion: 1.13.0 +appVersion: 1.14.0 home: https://github.com/zalando/postgres-operator description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes keywords: diff --git a/charts/postgres-operator/crds/operatorconfigurations.yaml b/charts/postgres-operator/crds/operatorconfigurations.yaml index d71f1b067..e56730a63 100644 --- a/charts/postgres-operator/crds/operatorconfigurations.yaml +++ b/charts/postgres-operator/crds/operatorconfigurations.yaml @@ -68,7 +68,7 @@ spec: type: string docker_image: type: string - default: "ghcr.io/zalando/spilo-16:3.3-p1" + default: "ghcr.io/zalando/spilo-17:4.0-p2" enable_crd_registration: type: boolean default: true @@ -167,10 +167,10 @@ spec: type: string minimal_major_version: type: string - default: "12" + default: "13" target_major_version: type: string - default: "16" + default: "17" kubernetes: type: object properties: diff --git a/charts/postgres-operator/crds/postgresqls.yaml b/charts/postgres-operator/crds/postgresqls.yaml index ebaf2d1f8..8083e5e1d 100644 --- a/charts/postgres-operator/crds/postgresqls.yaml +++ b/charts/postgres-operator/crds/postgresqls.yaml @@ -375,11 +375,11 @@ spec: version: type: string enum: - - "12" - "13" - "14" - "15" - "16" + - "17" parameters: type: object additionalProperties: @@ -514,6 +514,9 @@ spec: type: string batchSize: type: integer + cpu: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' database: type: string enableRecovery: @@ -522,6 +525,9 @@ spec: type: object additionalProperties: type: string + memory: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' tables: type: object additionalProperties: @@ -533,6 +539,8 @@ spec: type: string idColumn: type: string + ignoreRecovery: + type: boolean payloadColumn: type: string recoveryEventType: diff --git a/charts/postgres-operator/index.yaml b/charts/postgres-operator/index.yaml index c72604daa..4da98d70a 100644 --- a/charts/postgres-operator/index.yaml +++ b/charts/postgres-operator/index.yaml @@ -1,9 +1,31 @@ apiVersion: v1 entries: postgres-operator: + - apiVersion: v2 + appVersion: 1.14.0 + created: "2024-12-23T11:25:32.596716566+01:00" + description: Postgres Operator creates and manages PostgreSQL clusters running + in Kubernetes + digest: 36e1571f3f455b213f16cdda7b1158648e8e84deb804ba47ed6b9b6d19263ba8 + home: https://github.com/zalando/postgres-operator + keywords: + - postgres + - operator + - cloud-native + - patroni + - spilo + maintainers: + - email: opensource@zalando.de + name: Zalando + name: postgres-operator + sources: + - https://github.com/zalando/postgres-operator + urls: + - postgres-operator-1.14.0.tgz + version: 1.14.0 - apiVersion: v2 appVersion: 1.13.0 - created: "2024-08-21T18:54:43.160735116+02:00" + created: "2024-12-23T11:25:32.591136261+01:00" description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes digest: a839601689aea0a7e6bc0712a5244d435683cf3314c95794097ff08540e1dfef @@ -25,7 +47,7 @@ entries: version: 1.13.0 - apiVersion: v2 appVersion: 1.12.2 - created: "2024-08-21T18:54:43.152249286+02:00" + created: "2024-12-23T11:25:32.585419709+01:00" description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes digest: 65858d14a40d7fd90c32bd9fc60021acc9555c161079f43a365c70171eaf21d8 @@ -47,7 +69,7 @@ entries: version: 1.12.2 - apiVersion: v2 appVersion: 1.11.0 - created: "2024-08-21T18:54:43.145837894+02:00" + created: "2024-12-23T11:25:32.580077286+01:00" description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes digest: 3914b5e117bda0834f05c9207f007e2ac372864cf6e86dcc2e1362bbe46c14d9 @@ -69,7 +91,7 @@ entries: version: 1.11.0 - apiVersion: v2 appVersion: 1.10.1 - created: "2024-08-21T18:54:43.139552116+02:00" + created: "2024-12-23T11:25:32.574641578+01:00" description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes digest: cc3baa41753da92466223d0b334df27e79c882296577b404a8e9071411fcf19c @@ -91,7 +113,7 @@ entries: version: 1.10.1 - apiVersion: v2 appVersion: 1.9.0 - created: "2024-08-21T18:54:43.168490032+02:00" + created: "2024-12-23T11:25:32.604748814+01:00" description: Postgres Operator creates and manages PostgreSQL clusters running in Kubernetes digest: 64df90c898ca591eb3a330328173ffaadfbf9ddd474d8c42ed143edc9e3f4276 @@ -111,4 +133,4 @@ entries: urls: - postgres-operator-1.9.0.tgz version: 1.9.0 -generated: "2024-08-21T18:54:43.126871802+02:00" +generated: "2024-12-23T11:25:32.568598763+01:00" diff --git a/charts/postgres-operator/postgres-operator-1.14.0.tgz b/charts/postgres-operator/postgres-operator-1.14.0.tgz new file mode 100644 index 000000000..df95fd01d Binary files /dev/null and b/charts/postgres-operator/postgres-operator-1.14.0.tgz differ diff --git a/charts/postgres-operator/templates/clusterrole.yaml b/charts/postgres-operator/templates/clusterrole.yaml index 1fd066fa5..ad3b46064 100644 --- a/charts/postgres-operator/templates/clusterrole.yaml +++ b/charts/postgres-operator/templates/clusterrole.yaml @@ -141,7 +141,7 @@ rules: - get - list - patch -{{- if toString .Values.configKubernetes.storage_resize_mode | eq "pvc" }} +{{- if or (toString .Values.configKubernetes.storage_resize_mode | eq "pvc") (toString .Values.configKubernetes.storage_resize_mode | eq "mixed") }} - update {{- end }} # to read existing PVs. Creation should be done via dynamic provisioning diff --git a/charts/postgres-operator/templates/deployment.yaml b/charts/postgres-operator/templates/deployment.yaml index abd66cfc8..395843942 100644 --- a/charts/postgres-operator/templates/deployment.yaml +++ b/charts/postgres-operator/templates/deployment.yaml @@ -54,7 +54,7 @@ spec: value: {{ template "postgres-operator.controllerID" . }} {{- end }} {{- if .Values.extraEnvs }} - {{- .Values.extraEnvs | toYaml | nindent 8 }} +{{ toYaml .Values.extraEnvs | indent 8 }} {{- end }} resources: {{ toYaml .Values.resources | indent 10 }} diff --git a/charts/postgres-operator/values.yaml b/charts/postgres-operator/values.yaml index 27630843c..d2ad03d5f 100644 --- a/charts/postgres-operator/values.yaml +++ b/charts/postgres-operator/values.yaml @@ -1,7 +1,7 @@ image: registry: ghcr.io repository: zalando/postgres-operator - tag: v1.13.0 + tag: v1.14.0 pullPolicy: "IfNotPresent" # Optionally specify an array of imagePullSecrets. @@ -38,7 +38,7 @@ configGeneral: # etcd connection string for Patroni. Empty uses K8s-native DCS. etcd_host: "" # Spilo docker image - docker_image: ghcr.io/zalando/spilo-16:3.3-p1 + docker_image: ghcr.io/zalando/spilo-17:4.0-p2 # key name for annotation to ignore globally configured instance limits # ignore_instance_limits_annotation_key: "" @@ -89,9 +89,9 @@ configMajorVersionUpgrade: # - acid # minimal Postgres major version that will not automatically be upgraded - minimal_major_version: "12" + minimal_major_version: "13" # target Postgres major version when upgrading clusters automatically - target_major_version: "16" + target_major_version: "17" configKubernetes: # list of additional capabilities for postgres container diff --git a/cmd/main.go b/cmd/main.go index 0b48ac863..adbf0cce5 100644 --- a/cmd/main.go +++ b/cmd/main.go @@ -35,6 +35,8 @@ func init() { flag.BoolVar(&outOfCluster, "outofcluster", false, "Whether the operator runs in- our outside of the Kubernetes cluster.") flag.BoolVar(&config.NoDatabaseAccess, "nodatabaseaccess", false, "Disable all access to the database from the operator side.") flag.BoolVar(&config.NoTeamsAPI, "noteamsapi", false, "Disable all access to the teams API") + flag.IntVar(&config.KubeQPS, "kubeqps", 10, "Kubernetes api requests per second.") + flag.IntVar(&config.KubeBurst, "kubeburst", 20, "Kubernetes api requests burst limit.") flag.Parse() config.EnableJsonLogging = os.Getenv("ENABLE_JSON_LOGGING") == "true" @@ -83,6 +85,9 @@ func main() { log.Fatalf("couldn't get REST config: %v", err) } + config.RestConfig.QPS = float32(config.KubeQPS) + config.RestConfig.Burst = config.KubeBurst + c := controller.NewController(&config, "") c.Run(stop, wg) diff --git a/docker/DebugDockerfile b/docker/DebugDockerfile index ec1ff6d2f..18cb631fe 100644 --- a/docker/DebugDockerfile +++ b/docker/DebugDockerfile @@ -1,4 +1,4 @@ -FROM golang:1.22-alpine +FROM golang:1.23-alpine LABEL maintainer="Team ACID @ Zalando " # We need root certificates to deal with teams api over https diff --git a/docker/Dockerfile b/docker/Dockerfile index b0808c3bc..1fd2020d8 100644 --- a/docker/Dockerfile +++ b/docker/Dockerfile @@ -1,5 +1,5 @@ ARG BASE_IMAGE=registry.opensource.zalan.do/library/alpine-3:latest -FROM golang:1.22-alpine AS builder +FROM golang:1.23-alpine AS builder ARG VERSION=latest COPY . /go/src/github.com/zalando/postgres-operator diff --git a/docker/build_operator.sh b/docker/build_operator.sh index 2ada63a81..6c1817b1b 100644 --- a/docker/build_operator.sh +++ b/docker/build_operator.sh @@ -13,7 +13,7 @@ apt-get install -y wget ( cd /tmp - wget -q "https://storage.googleapis.com/golang/go1.22.5.linux-${arch}.tar.gz" -O go.tar.gz + wget -q "https://storage.googleapis.com/golang/go1.23.4.linux-${arch}.tar.gz" -O go.tar.gz tar -xf go.tar.gz mv go /usr/local ln -s /usr/local/go/bin/go /usr/bin/go diff --git a/docs/administrator.md b/docs/administrator.md index 86ceca291..55abebc8b 100644 --- a/docs/administrator.md +++ b/docs/administrator.md @@ -63,14 +63,17 @@ the `PGVERSION` environment variable is set for the database pods. Since `v1.6.0` the related option `enable_pgversion_env_var` is enabled by default. In-place major version upgrades can be configured to be executed by the -operator with the `major_version_upgrade_mode` option. By default it is set -to `off` which means the cluster version will not change when increased in -the manifest. Still, a rolling update would be triggered updating the -`PGVERSION` variable. But Spilo's [`configure_spilo`](https://github.com/zalando/spilo/blob/master/postgres-appliance/scripts/configure_spilo.py) -script will notice the version mismatch and start the old version again. +operator with the `major_version_upgrade_mode` option. By default, it is +enabled (mode: `manual`). In any case, altering the version in the manifest +will trigger a rolling update of pods to update the `PGVERSION` env variable. +Spilo's [`configure_spilo`](https://github.com/zalando/spilo/blob/master/postgres-appliance/scripts/configure_spilo.py) +script will notice the version mismatch but start the current version again. -In this scenario the major version could then be run by a user from within the -primary pod. Exec into the container and run: +Next, the operator would call an updage script inside Spilo. When automatic +upgrades are disabled (mode: `off`) the upgrade could still be run by a user +from within the primary pod. This gives you full control about the point in +time when the upgrade can be started (check also maintenance windows below). +Exec into the container and run: ```bash python3 /scripts/inplace_upgrade.py N ``` @@ -79,17 +82,32 @@ The upgrade is usually fast, well under one minute for most DBs. Note, that changes become irrevertible once `pg_upgrade` is called. To understand the upgrade procedure, refer to the [corresponding PR in Spilo](https://github.com/zalando/spilo/pull/488). -When `major_version_upgrade_mode` is set to `manual` the operator will run -the upgrade script for you after the manifest is updated and pods are rotated. -It is also possible to define `maintenanceWindows` in the Postgres manifest to -better control when such automated upgrades should take place after increasing -the version. +When `major_version_upgrade_mode` is set to `full` the operator will compare +the version in the manifest with the configured `minimal_major_version`. If it +is lower the operator would start an automatic upgrade as described above. The +configured `major_target_version` will be used as the new version. This option +can be useful if you have to get rid of outdated major versions in your fleet. +Please note, that the operator does not patch the version in the manifest. +Thus, the `full` mode can create drift between desired and actual state. + +### Upgrade during maintenance windows + +When `maintenanceWindows` are defined in the Postgres manifest the operator +will trigger a major version upgrade only during these periods. Make sure they +are at least twice as long as your configured `resync_period` to guarantee +that operator actions can be triggered. ### Upgrade annotations -When an upgrade is executed, the operator sets an annotation in the PostgreSQL resource, either `last-major-upgrade-success` if the upgrade succeeds, or `last-major-upgrade-failure` if it fails. The value of the annotation is a timestamp indicating when the upgrade occurred. +When an upgrade is executed, the operator sets an annotation in the PostgreSQL +resource, either `last-major-upgrade-success` if the upgrade succeeds, or +`last-major-upgrade-failure` if it fails. The value of the annotation is a +timestamp indicating when the upgrade occurred. -If a PostgreSQL resource contains a failure annotation, the operator will not attempt to retry the upgrade during a sync event. To remove the failure annotation, you can revert the PostgreSQL version back to the current version. This action will trigger the removal of the failure annotation. +If a PostgreSQL resource contains a failure annotation, the operator will not +attempt to retry the upgrade during a sync event. To remove the failure +annotation, you can revert the PostgreSQL version back to the current version. +This action will trigger the removal of the failure annotation. ## Non-default cluster domain @@ -1279,7 +1297,7 @@ aws_or_gcp: If cluster members have to be (re)initialized restoring physical backups happens automatically either from the backup location or by running -[pg_basebackup](https://www.postgresql.org/docs/16/app-pgbasebackup.html) +[pg_basebackup](https://www.postgresql.org/docs/17/app-pgbasebackup.html) on one of the other running instances (preferably replicas if they do not lag behind). You can test restoring backups by [cloning](user.md#how-to-clone-an-existing-postgresql-cluster) clusters. @@ -1387,6 +1405,10 @@ configuration: volumeMounts: - mountPath: /custom-pgdata-mountpoint name: pgdata + env: + - name: "ENV_VAR_NAME" + value: "any-k8s-env-things" + command: ['sh', '-c', 'echo "logging" > /opt/logs.txt'] - ... ``` diff --git a/docs/developer.md b/docs/developer.md index 31f48d92d..c006aded0 100644 --- a/docs/developer.md +++ b/docs/developer.md @@ -186,7 +186,7 @@ go get -u github.com/derekparker/delve/cmd/dlv ``` RUN apk --no-cache add go git musl-dev -RUN go get -d github.com/derekparker/delve/cmd/dlv +RUN go get github.com/derekparker/delve/cmd/dlv ``` * Update the `Makefile` to build the project with debugging symbols. For that diff --git a/docs/reference/cluster_manifest.md b/docs/reference/cluster_manifest.md index bf731be2e..8d02ee7d8 100644 --- a/docs/reference/cluster_manifest.md +++ b/docs/reference/cluster_manifest.md @@ -638,7 +638,7 @@ the global configuration before adding the `tls` section'. ## Change data capture streams This sections enables change data capture (CDC) streams via Postgres' -[logical decoding](https://www.postgresql.org/docs/16/logicaldecoding.html) +[logical decoding](https://www.postgresql.org/docs/17/logicaldecoding.html) feature and `pgoutput` plugin. While the Postgres operator takes responsibility for providing the setup to publish change events, it relies on external tools to consume them. At Zalando, we are using a workflow based on @@ -652,11 +652,11 @@ can have the following properties: * **applicationId** The application name to which the database and CDC belongs to. For each - set of streams with a distinct `applicationId` a separate stream CR as well - as a separate logical replication slot will be created. This means there can - be different streams in the same database and streams with the same - `applicationId` are bundled in one stream CR. The stream CR will be called - like the Postgres cluster plus "-" suffix. Required. + set of streams with a distinct `applicationId` a separate stream resource as + well as a separate logical replication slot will be created. This means there + can be different streams in the same database and streams with the same + `applicationId` are bundled in one stream resource. The stream resource will + be called like the Postgres cluster plus "-" suffix. Required. * **database** Name of the database from where events will be published via Postgres' @@ -667,21 +667,37 @@ can have the following properties: * **tables** Defines a map of table names and their properties (`eventType`, `idColumn` - and `payloadColumn`). The CDC operator is following the [outbox pattern](https://debezium.io/blog/2019/02/19/reliable-microservices-data-exchange-with-the-outbox-pattern/). + and `payloadColumn`). Required. + The CDC operator is following the [outbox pattern](https://debezium.io/blog/2019/02/19/reliable-microservices-data-exchange-with-the-outbox-pattern/). The application is responsible for putting events into a (JSON/B or VARCHAR) payload column of the outbox table in the structure of the specified target - event type. The operator will create a [PUBLICATION](https://www.postgresql.org/docs/16/logical-replication-publication.html) + event type. The operator will create a [PUBLICATION](https://www.postgresql.org/docs/17/logical-replication-publication.html) in Postgres for all tables specified for one `database` and `applicationId`. The CDC operator will consume from it shortly after transactions are committed to the outbox table. The `idColumn` will be used in telemetry for the CDC operator. The names for `idColumn` and `payloadColumn` can be configured. Defaults are `id` and `payload`. The target `eventType` has to - be defined. Required. + be defined. One can also specify a `recoveryEventType` that will be used + for a dead letter queue. By enabling `ignoreRecovery`, you can choose to + ignore failing events. * **filter** Streamed events can be filtered by a jsonpath expression for each table. Optional. +* **enableRecovery** + Flag to enable a dead letter queue recovery for all streams tables. + Alternatively, recovery can also be enable for single outbox tables by only + specifying a `recoveryEventType` and no `enableRecovery` flag. When set to + false or missing, events will be retried until consuming succeeded. You can + use a `filter` expression to get rid of poison pills. Optional. + * **batchSize** Defines the size of batches in which events are consumed. Optional. Defaults to 1. + +* **cpu** + CPU requests to be set as an annotation on the stream resource. Optional. + +* **memory** + memory requests to be set as an annotation on the stream resource. Optional. diff --git a/docs/reference/operator_parameters.md b/docs/reference/operator_parameters.md index ae8b164a1..38cbf62a4 100644 --- a/docs/reference/operator_parameters.md +++ b/docs/reference/operator_parameters.md @@ -94,9 +94,6 @@ Those are top-level keys, containing both leaf keys and groups. * **enable_pgversion_env_var** With newer versions of Spilo, it is preferable to use `PGVERSION` pod environment variable instead of the setting `postgresql.bin_dir` in the `SPILO_CONFIGURATION` env variable. When this option is true, the operator sets `PGVERSION` and omits `postgresql.bin_dir` from `SPILO_CONFIGURATION`. When false, the `postgresql.bin_dir` is set. This setting takes precedence over `PGVERSION`; see PR 222 in Spilo. The default is `true`. -* **enable_spilo_wal_path_compat** - enables backwards compatible path between Spilo 12 and Spilo 13+ images. The default is `false`. - * **enable_team_id_clustername_prefix** To lower the risk of name clashes between clusters of different teams you can turn on this flag and the operator will sync only clusters where the @@ -250,12 +247,12 @@ CRD-configuration, they are grouped under the `major_version_upgrade` key. * **minimal_major_version** The minimal Postgres major version that will not automatically be upgraded - when `major_version_upgrade_mode` is set to `"full"`. The default is `"12"`. + when `major_version_upgrade_mode` is set to `"full"`. The default is `"13"`. * **target_major_version** The target Postgres major version when upgrading clusters automatically which violate the configured allowed `minimal_major_version` when - `major_version_upgrade_mode` is set to `"full"`. The default is `"16"`. + `major_version_upgrade_mode` is set to `"full"`. The default is `"17"`. ## Kubernetes resources diff --git a/docs/user.md b/docs/user.md index 78b30dfe9..c63e43f57 100644 --- a/docs/user.md +++ b/docs/user.md @@ -30,7 +30,7 @@ spec: databases: foo: zalando postgresql: - version: "16" + version: "17" ``` Once you cloned the Postgres Operator [repository](https://github.com/zalando/postgres-operator) @@ -109,7 +109,7 @@ metadata: spec: [...] postgresql: - version: "16" + version: "17" parameters: password_encryption: scram-sha-256 ``` @@ -517,7 +517,7 @@ Postgres Operator will create the following NOLOGIN roles: The `_owner` role is the database owner and should be used when creating new database objects. All members of the `admin` role, e.g. teams API roles, can -become the owner with the `SET ROLE` command. [Default privileges](https://www.postgresql.org/docs/16/sql-alterdefaultprivileges.html) +become the owner with the `SET ROLE` command. [Default privileges](https://www.postgresql.org/docs/17/sql-alterdefaultprivileges.html) are configured for the owner role so that the `_reader` role automatically gets read-access (SELECT) to new tables and sequences and the `_writer` receives write-access (INSERT, UPDATE, DELETE on tables, @@ -594,7 +594,7 @@ spec: ### Schema `search_path` for default roles -The schema [`search_path`](https://www.postgresql.org/docs/16/ddl-schemas.html#DDL-SCHEMAS-PATH) +The schema [`search_path`](https://www.postgresql.org/docs/17/ddl-schemas.html#DDL-SCHEMAS-PATH) for each role will include the role name and the schemas, this role should have access to. So `foo_bar_writer` does not have to schema-qualify tables from schemas `foo_bar_writer, bar`, while `foo_writer` can look up `foo_writer` and @@ -695,7 +695,7 @@ handle it. ### HugePages support -The operator supports [HugePages](https://www.postgresql.org/docs/16/kernel-resources.html#LINUX-HUGEPAGES). +The operator supports [HugePages](https://www.postgresql.org/docs/17/kernel-resources.html#LINUX-HUGEPAGES). To enable HugePages, set the matching resource requests and/or limits in the manifest: ```yaml @@ -838,7 +838,7 @@ spec: ### Clone directly Another way to get a fresh copy of your source DB cluster is via -[pg_basebackup](https://www.postgresql.org/docs/16/app-pgbasebackup.html). To +[pg_basebackup](https://www.postgresql.org/docs/17/app-pgbasebackup.html). To use this feature simply leave out the timestamp field from the clone section. The operator will connect to the service of the source cluster by name. If the cluster is called test, then the connection string will look like host=test @@ -1005,6 +1005,7 @@ spec: env: - name: "ENV_VAR_NAME" value: "any-k8s-env-things" + command: ['sh', '-c', 'echo "logging" > /opt/logs.txt'] ``` In addition to any environment variables you specify, the following environment diff --git a/e2e/Makefile b/e2e/Makefile index 8e200dab1..52d24e9e5 100644 --- a/e2e/Makefile +++ b/e2e/Makefile @@ -46,7 +46,7 @@ tools: # install pinned version of 'kind' # go install must run outside of a dir with a (module-based) Go project ! # otherwise go install updates project's dependencies and/or behaves differently - cd "/tmp" && GO111MODULE=on go install sigs.k8s.io/kind@v0.23.0 + cd "/tmp" && GO111MODULE=on go install sigs.k8s.io/kind@v0.24.0 e2etest: tools copy clean ./run.sh main diff --git a/e2e/run.sh b/e2e/run.sh index 1adca479d..d289cb3f4 100755 --- a/e2e/run.sh +++ b/e2e/run.sh @@ -8,7 +8,7 @@ IFS=$'\n\t' readonly cluster_name="postgres-operator-e2e-tests" readonly kubeconfig_path="/tmp/kind-config-${cluster_name}" -readonly spilo_image="registry.opensource.zalan.do/acid/spilo-16-e2e:0.1" +readonly spilo_image="registry.opensource.zalan.do/acid/spilo-17-e2e:0.3" readonly e2e_test_runner_image="registry.opensource.zalan.do/acid/postgres-operator-e2e-tests-runner:0.4" export GOPATH=${GOPATH-~/go} diff --git a/e2e/tests/test_e2e.py b/e2e/tests/test_e2e.py index 83c52d86d..ba9cd6c8a 100644 --- a/e2e/tests/test_e2e.py +++ b/e2e/tests/test_e2e.py @@ -1198,35 +1198,35 @@ class EndToEndTestCase(unittest.TestCase): k8s = self.k8s cluster_label = 'application=spilo,cluster-name=acid-upgrade-test' - with open("manifests/minimal-postgres-manifest-12.yaml", 'r+') as f: + with open("manifests/minimal-postgres-lowest-version-manifest.yaml", 'r+') as f: upgrade_manifest = yaml.safe_load(f) upgrade_manifest["spec"]["dockerImage"] = SPILO_FULL_IMAGE - with open("manifests/minimal-postgres-manifest-12.yaml", 'w') as f: + with open("manifests/minimal-postgres-lowest-version-manifest.yaml", 'w') as f: yaml.dump(upgrade_manifest, f, Dumper=yaml.Dumper) - k8s.create_with_kubectl("manifests/minimal-postgres-manifest-12.yaml") + k8s.create_with_kubectl("manifests/minimal-postgres-lowest-version-manifest.yaml") self.eventuallyEqual(lambda: k8s.count_running_pods(labels=cluster_label), 2, "No 2 pods running") self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") - self.eventuallyEqual(check_version, 12, "Version is not correct") + self.eventuallyEqual(check_version, 13, "Version is not correct") master_nodes, _ = k8s.get_cluster_nodes(cluster_labels=cluster_label) # should upgrade immediately - pg_patch_version_13 = { + pg_patch_version_14 = { "spec": { "postgresql": { - "version": "13" + "version": "14" } } } k8s.api.custom_objects_api.patch_namespaced_custom_object( - "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_13) + "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_14) self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) - self.eventuallyEqual(check_version, 13, "Version should be upgraded from 12 to 13") + self.eventuallyEqual(check_version, 14, "Version should be upgraded from 13 to 14") # check if annotation for last upgrade's success is set annotations = get_annotations() @@ -1235,10 +1235,10 @@ class EndToEndTestCase(unittest.TestCase): # should not upgrade because current time is not in maintenanceWindow current_time = datetime.now() maintenance_window_future = f"{(current_time+timedelta(minutes=60)).strftime('%H:%M')}-{(current_time+timedelta(minutes=120)).strftime('%H:%M')}" - pg_patch_version_14 = { + pg_patch_version_15 = { "spec": { "postgresql": { - "version": "14" + "version": "15" }, "maintenanceWindows": [ maintenance_window_future @@ -1246,23 +1246,23 @@ class EndToEndTestCase(unittest.TestCase): } } k8s.api.custom_objects_api.patch_namespaced_custom_object( - "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_14) + "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15) self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") k8s.wait_for_pod_failover(master_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) - self.eventuallyEqual(check_version, 13, "Version should not be upgraded") + self.eventuallyEqual(check_version, 14, "Version should not be upgraded") second_annotations = get_annotations() self.assertIsNone(second_annotations.get("last-major-upgrade-failure"), "Annotation for last upgrade's failure should not be set") # change the version again to trigger operator sync maintenance_window_current = f"{(current_time-timedelta(minutes=30)).strftime('%H:%M')}-{(current_time+timedelta(minutes=30)).strftime('%H:%M')}" - pg_patch_version_15 = { + pg_patch_version_16 = { "spec": { "postgresql": { - "version": "15" + "version": "16" }, "maintenanceWindows": [ maintenance_window_current @@ -1271,13 +1271,13 @@ class EndToEndTestCase(unittest.TestCase): } k8s.api.custom_objects_api.patch_namespaced_custom_object( - "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_15) + "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_16) self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") k8s.wait_for_pod_failover(master_nodes, 'spilo-role=replica,' + cluster_label) k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) - self.eventuallyEqual(check_version, 15, "Version should be upgraded from 13 to 15") + self.eventuallyEqual(check_version, 16, "Version should be upgraded from 14 to 16") # check if annotation for last upgrade's success is updated after second upgrade third_annotations = get_annotations() @@ -1285,7 +1285,7 @@ class EndToEndTestCase(unittest.TestCase): self.assertNotEqual(annotations.get("last-major-upgrade-success"), third_annotations.get("last-major-upgrade-success"), "Annotation for last upgrade's success is not updated") # test upgrade with failed upgrade annotation - pg_patch_version_16 = { + pg_patch_version_17 = { "metadata": { "annotations": { "last-major-upgrade-failure": "2024-01-02T15:04:05Z" @@ -1293,18 +1293,18 @@ class EndToEndTestCase(unittest.TestCase): }, "spec": { "postgresql": { - "version": "16" + "version": "17" }, }, } k8s.api.custom_objects_api.patch_namespaced_custom_object( - "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_16) + "acid.zalan.do", "v1", "default", "postgresqls", "acid-upgrade-test", pg_patch_version_17) self.eventuallyEqual(lambda: k8s.get_operator_state(), {"0": "idle"}, "Operator does not get in sync") k8s.wait_for_pod_failover(master_nodes, 'spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role={},'.format(LEADER_LABEL_VALUE) + cluster_label) k8s.wait_for_pod_start('spilo-role=replica,' + cluster_label) - self.eventuallyEqual(check_version, 15, "Version should not be upgraded because annotation for last upgrade's failure is set") + self.eventuallyEqual(check_version, 16, "Version should not be upgraded because annotation for last upgrade's failure is set") # change the version back to 15 and should remove failure annotation k8s.api.custom_objects_api.patch_namespaced_custom_object( @@ -2201,6 +2201,8 @@ class EndToEndTestCase(unittest.TestCase): { "applicationId": "test-app", "batchSize": 100, + "cpu": "100m", + "memory": "200Mi", "database": "foo", "enableRecovery": True, "tables": { @@ -2222,7 +2224,7 @@ class EndToEndTestCase(unittest.TestCase): "eventType": "test-event", "idColumn": "id", "payloadColumn": "payload", - "recoveryEventType": "test-event-dlq" + "ignoreRecovery": True } } } diff --git a/go.mod b/go.mod index d6390f45f..9c0125229 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/zalando/postgres-operator -go 1.22.0 +go 1.23.4 require ( github.com/aws/aws-sdk-go v1.53.8 @@ -11,7 +11,7 @@ require ( github.com/r3labs/diff v1.1.0 github.com/sirupsen/logrus v1.9.3 github.com/stretchr/testify v1.9.0 - golang.org/x/crypto v0.26.0 + golang.org/x/crypto v0.31.0 golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 gopkg.in/yaml.v2 v2.4.0 k8s.io/api v0.30.4 @@ -54,10 +54,10 @@ require ( golang.org/x/mod v0.17.0 // indirect golang.org/x/net v0.25.0 // indirect golang.org/x/oauth2 v0.10.0 // indirect - golang.org/x/sync v0.8.0 // indirect - golang.org/x/sys v0.23.0 // indirect - golang.org/x/term v0.23.0 // indirect - golang.org/x/text v0.17.0 // indirect + golang.org/x/sync v0.10.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d // indirect google.golang.org/appengine v1.6.7 // indirect diff --git a/go.sum b/go.sum index c7992fea0..0e55f2dd7 100644 --- a/go.sum +++ b/go.sum @@ -119,8 +119,8 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1 golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o= golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -142,8 +142,8 @@ golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= +golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -151,16 +151,16 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= -golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/kubectl-pg/go.mod b/kubectl-pg/go.mod index 67c83354b..9b2e1bbc5 100644 --- a/kubectl-pg/go.mod +++ b/kubectl-pg/go.mod @@ -1,6 +1,6 @@ module github.com/zalando/postgres-operator/kubectl-pg -go 1.22.0 +go 1.23.4 require ( github.com/spf13/cobra v1.8.1 @@ -51,13 +51,13 @@ require ( github.com/spf13/pflag v1.0.5 // indirect github.com/subosito/gotenv v1.6.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/crypto v0.26.0 // indirect + golang.org/x/crypto v0.31.0 // indirect golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 // indirect golang.org/x/net v0.25.0 // indirect golang.org/x/oauth2 v0.18.0 // indirect - golang.org/x/sys v0.23.0 // indirect - golang.org/x/term v0.23.0 // indirect - golang.org/x/text v0.17.0 // indirect + golang.org/x/sys v0.28.0 // indirect + golang.org/x/term v0.27.0 // indirect + golang.org/x/text v0.21.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/appengine v1.6.8 // indirect google.golang.org/protobuf v1.33.0 // indirect diff --git a/kubectl-pg/go.sum b/kubectl-pg/go.sum index c873d0e37..2237a9e03 100644 --- a/kubectl-pg/go.sum +++ b/kubectl-pg/go.sum @@ -137,8 +137,8 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.26.0 h1:RrRspgV4mU+YwB4FYnuBoKsUapNIL5cohGAmSH3azsw= -golang.org/x/crypto v0.26.0/go.mod h1:GY7jblb9wI+FOo5y8/S2oY4zWP07AkOJ4+jxCqdqn54= +golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= +golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o= golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= @@ -166,18 +166,18 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= -golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= +golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= -golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= +golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= +golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= -golang.org/x/text v0.17.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= +golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/logical-backup/Dockerfile b/logical-backup/Dockerfile index 8770e5e1a..137f4efa8 100644 --- a/logical-backup/Dockerfile +++ b/logical-backup/Dockerfile @@ -25,11 +25,11 @@ RUN apt-get update \ && curl --silent https://www.postgresql.org/media/keys/ACCC4CF8.asc | apt-key add - \ && apt-get update \ && apt-get install --no-install-recommends -y \ + postgresql-client-17 \ postgresql-client-16 \ postgresql-client-15 \ postgresql-client-14 \ postgresql-client-13 \ - postgresql-client-12 \ && apt-get clean \ && rm -rf /var/lib/apt/lists/* diff --git a/manifests/complete-postgres-manifest.yaml b/manifests/complete-postgres-manifest.yaml index 0b3dc4aa7..44d317123 100644 --- a/manifests/complete-postgres-manifest.yaml +++ b/manifests/complete-postgres-manifest.yaml @@ -10,7 +10,7 @@ metadata: # "delete-date": "2020-08-31" # can only be deleted on that day if "delete-date "key is configured # "delete-clustername": "acid-test-cluster" # can only be deleted when name matches if "delete-clustername" key is configured spec: - dockerImage: ghcr.io/zalando/spilo-16:3.3-p1 + dockerImage: ghcr.io/zalando/spilo-17:4.0-p2 teamId: "acid" numberOfInstances: 2 users: # Application/Robot users @@ -48,7 +48,7 @@ spec: defaultRoles: true defaultUsers: false postgresql: - version: "16" + version: "17" parameters: # Expert section shared_buffers: "32MB" max_connections: "10" diff --git a/manifests/configmap.yaml b/manifests/configmap.yaml index 70e3328f2..211949a1e 100644 --- a/manifests/configmap.yaml +++ b/manifests/configmap.yaml @@ -34,7 +34,7 @@ data: default_memory_request: 100Mi # delete_annotation_date_key: delete-date # delete_annotation_name_key: delete-clustername - docker_image: ghcr.io/zalando/spilo-16:3.3-p1 + docker_image: ghcr.io/zalando/spilo-17:4.0-p2 # downscaler_annotations: "deployment-time,downscaler/*" enable_admin_role_for_users: "true" enable_crd_registration: "true" @@ -86,7 +86,7 @@ data: # logical_backup_cpu_limit: "" # logical_backup_cpu_request: "" logical_backup_cronjob_environment_secret: "" - logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0" + logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0" # logical_backup_google_application_credentials: "" logical_backup_job_prefix: "logical-backup-" # logical_backup_memory_limit: "" @@ -112,7 +112,7 @@ data: min_cpu_limit: 250m min_instances: "-1" min_memory_limit: 250Mi - minimal_major_version: "12" + minimal_major_version: "13" # node_readiness_label: "status:ready" # node_readiness_label_merge: "OR" oauth_token_secret_name: postgresql-operator @@ -163,7 +163,7 @@ data: spilo_privileged: "false" storage_resize_mode: "pvc" super_username: postgres - target_major_version: "16" + target_major_version: "17" team_admin_role: "admin" team_api_role_configuration: "log_statement:all" teams_api_url: http://fake-teams-api.default.svc.cluster.local diff --git a/manifests/minimal-master-replica-svcmonitor.yaml b/manifests/minimal-master-replica-svcmonitor.yaml index 67ed28c81..049ea12eb 100644 --- a/manifests/minimal-master-replica-svcmonitor.yaml +++ b/manifests/minimal-master-replica-svcmonitor.yaml @@ -31,11 +31,21 @@ spec: version: "13" sidecars: - name: "exporter" - image: "wrouesnel/postgres_exporter" + image: "quay.io/prometheuscommunity/postgres-exporter:v0.15.0" ports: - name: exporter containerPort: 9187 protocol: TCP + env: + - name: DATA_SOURCE_URI + value: ":5432/?sslmode=disable" + - name: DATA_SOURCE_USER + value: "postgres" + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: postgres.test-pg.credentials.postgresql.acid.zalan.do + key: password resources: limits: cpu: 500m diff --git a/manifests/minimal-postgres-manifest-12.yaml b/manifests/minimal-postgres-lowest-version-manifest.yaml similarity index 95% rename from manifests/minimal-postgres-manifest-12.yaml rename to manifests/minimal-postgres-lowest-version-manifest.yaml index d578ac46d..40abf0c9c 100644 --- a/manifests/minimal-postgres-manifest-12.yaml +++ b/manifests/minimal-postgres-lowest-version-manifest.yaml @@ -17,4 +17,4 @@ spec: preparedDatabases: bar: {} postgresql: - version: "12" + version: "13" diff --git a/manifests/minimal-postgres-manifest.yaml b/manifests/minimal-postgres-manifest.yaml index d22327905..8b1ed275d 100644 --- a/manifests/minimal-postgres-manifest.yaml +++ b/manifests/minimal-postgres-manifest.yaml @@ -17,4 +17,4 @@ spec: preparedDatabases: bar: {} postgresql: - version: "16" + version: "17" diff --git a/manifests/operatorconfiguration.crd.yaml b/manifests/operatorconfiguration.crd.yaml index 996729696..85f13c973 100644 --- a/manifests/operatorconfiguration.crd.yaml +++ b/manifests/operatorconfiguration.crd.yaml @@ -66,7 +66,7 @@ spec: type: string docker_image: type: string - default: "ghcr.io/zalando/spilo-16:3.3-p1" + default: "ghcr.io/zalando/spilo-17:4.0-p2" enable_crd_registration: type: boolean default: true @@ -165,10 +165,10 @@ spec: type: string minimal_major_version: type: string - default: "12" + default: "13" target_major_version: type: string - default: "16" + default: "17" kubernetes: type: object properties: @@ -511,7 +511,7 @@ spec: pattern: '^(\d+m|\d+(\.\d{1,3})?)$' logical_backup_docker_image: type: string - default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0" + default: "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0" logical_backup_google_application_credentials: type: string logical_backup_job_prefix: diff --git a/manifests/postgres-operator.yaml b/manifests/postgres-operator.yaml index fbba84c7f..e3f77657e 100644 --- a/manifests/postgres-operator.yaml +++ b/manifests/postgres-operator.yaml @@ -19,7 +19,7 @@ spec: serviceAccountName: postgres-operator containers: - name: postgres-operator - image: ghcr.io/zalando/postgres-operator:v1.13.0 + image: ghcr.io/zalando/postgres-operator:v1.14.0 imagePullPolicy: IfNotPresent resources: requests: diff --git a/manifests/postgresql-operator-default-configuration.yaml b/manifests/postgresql-operator-default-configuration.yaml index d67132dab..5a6006b7c 100644 --- a/manifests/postgresql-operator-default-configuration.yaml +++ b/manifests/postgresql-operator-default-configuration.yaml @@ -3,7 +3,7 @@ kind: OperatorConfiguration metadata: name: postgresql-operator-default-configuration configuration: - docker_image: ghcr.io/zalando/spilo-16:3.3-p1 + docker_image: ghcr.io/zalando/spilo-17:4.0-p2 # enable_crd_registration: true # crd_categories: # - all @@ -39,8 +39,8 @@ configuration: major_version_upgrade_mode: "manual" # major_version_upgrade_team_allow_list: # - acid - minimal_major_version: "12" - target_major_version: "16" + minimal_major_version: "13" + target_major_version: "17" kubernetes: # additional_pod_capabilities: # - "SYS_NICE" @@ -169,7 +169,7 @@ configuration: # logical_backup_cpu_request: "" # logical_backup_memory_limit: "" # logical_backup_memory_request: "" - logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0" + logical_backup_docker_image: "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0" # logical_backup_google_application_credentials: "" logical_backup_job_prefix: "logical-backup-" logical_backup_provider: "s3" diff --git a/manifests/postgresql.crd.yaml b/manifests/postgresql.crd.yaml index 9207c83d4..39d751cef 100644 --- a/manifests/postgresql.crd.yaml +++ b/manifests/postgresql.crd.yaml @@ -373,11 +373,11 @@ spec: version: type: string enum: - - "12" - "13" - "14" - "15" - "16" + - "17" parameters: type: object additionalProperties: @@ -512,6 +512,9 @@ spec: type: string batchSize: type: integer + cpu: + type: string + pattern: '^(\d+m|\d+(\.\d{1,3})?)$' database: type: string enableRecovery: @@ -520,6 +523,9 @@ spec: type: object additionalProperties: type: string + memory: + type: string + pattern: '^(\d+(e\d+)?|\d+(\.\d+)?(e\d+)?[EPTGMK]i?)$' tables: type: object additionalProperties: @@ -531,6 +537,8 @@ spec: type: string idColumn: type: string + ignoreRecovery: + type: boolean payloadColumn: type: string recoveryEventType: diff --git a/manifests/standby-manifest.yaml b/manifests/standby-manifest.yaml index aece29dae..eb90464a6 100644 --- a/manifests/standby-manifest.yaml +++ b/manifests/standby-manifest.yaml @@ -8,7 +8,7 @@ spec: size: 1Gi numberOfInstances: 1 postgresql: - version: "16" + version: "17" # Make this a standby cluster and provide either the s3 bucket path of source cluster or the remote primary host for continuous streaming. standby: # s3_wal_path: "s3://mybucket/spilo/acid-minimal-cluster/abcd1234-2a4b-4b2a-8c9c-c1234defg567/wal/14/" diff --git a/pkg/apis/acid.zalan.do/v1/crds.go b/pkg/apis/acid.zalan.do/v1/crds.go index 01b38d97b..0728e4dec 100644 --- a/pkg/apis/acid.zalan.do/v1/crds.go +++ b/pkg/apis/acid.zalan.do/v1/crds.go @@ -595,9 +595,6 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ "version": { Type: "string", Enum: []apiextv1.JSON{ - { - Raw: []byte(`"12"`), - }, { Raw: []byte(`"13"`), }, @@ -610,6 +607,9 @@ var PostgresCRDResourceValidation = apiextv1.CustomResourceValidation{ { Raw: []byte(`"16"`), }, + { + Raw: []byte(`"17"`), + }, }, }, "parameters": { @@ -1164,7 +1164,8 @@ var OperatorConfigCRDResourceValidation = apiextv1.CustomResourceValidation{ Type: "boolean", }, "enable_spilo_wal_path_compat": { - Type: "boolean", + Type: "boolean", + Description: "deprecated", }, "enable_team_id_clustername_prefix": { Type: "boolean", diff --git a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go index a0b0ada1b..8789bfc48 100644 --- a/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go +++ b/pkg/apis/acid.zalan.do/v1/operator_configuration_type.go @@ -49,8 +49,8 @@ type PostgresUsersConfiguration struct { type MajorVersionUpgradeConfiguration struct { MajorVersionUpgradeMode string `json:"major_version_upgrade_mode" default:"manual"` // off - no actions, manual - manifest triggers action, full - manifest and minimal version violation trigger upgrade MajorVersionUpgradeTeamAllowList []string `json:"major_version_upgrade_team_allow_list,omitempty"` - MinimalMajorVersion string `json:"minimal_major_version" default:"12"` - TargetMajorVersion string `json:"target_major_version" default:"16"` + MinimalMajorVersion string `json:"minimal_major_version" default:"13"` + TargetMajorVersion string `json:"target_major_version" default:"17"` } // KubernetesMetaConfiguration defines k8s conf required for all Postgres clusters and the operator itself diff --git a/pkg/apis/acid.zalan.do/v1/postgresql_type.go b/pkg/apis/acid.zalan.do/v1/postgresql_type.go index 3d731743f..ef6dfe7ff 100644 --- a/pkg/apis/acid.zalan.do/v1/postgresql_type.go +++ b/pkg/apis/acid.zalan.do/v1/postgresql_type.go @@ -220,6 +220,7 @@ type Sidecar struct { DockerImage string `json:"image,omitempty"` Ports []v1.ContainerPort `json:"ports,omitempty"` Env []v1.EnvVar `json:"env,omitempty"` + Command []string `json:"command,omitempty"` } // UserFlags defines flags (such as superuser, nologin) that could be assigned to individual users @@ -258,6 +259,8 @@ type Stream struct { Tables map[string]StreamTable `json:"tables"` Filter map[string]*string `json:"filter,omitempty"` BatchSize *uint32 `json:"batchSize,omitempty"` + CPU *string `json:"cpu,omitempty"` + Memory *string `json:"memory,omitempty"` EnableRecovery *bool `json:"enableRecovery,omitempty"` } @@ -265,6 +268,7 @@ type Stream struct { type StreamTable struct { EventType string `json:"eventType"` RecoveryEventType string `json:"recoveryEventType,omitempty"` + IgnoreRecovery *bool `json:"ignoreRecovery,omitempty"` IdColumn *string `json:"idColumn,omitempty"` PayloadColumn *string `json:"payloadColumn,omitempty"` } diff --git a/pkg/apis/acid.zalan.do/v1/util_test.go b/pkg/apis/acid.zalan.do/v1/util_test.go index bef6cc3ec..5e4913ffe 100644 --- a/pkg/apis/acid.zalan.do/v1/util_test.go +++ b/pkg/apis/acid.zalan.do/v1/util_test.go @@ -219,7 +219,7 @@ var unmarshalCluster = []struct { "127.0.0.1/32" ], "postgresql": { - "version": "16", + "version": "17", "parameters": { "shared_buffers": "32MB", "max_connections": "10", @@ -279,7 +279,7 @@ var unmarshalCluster = []struct { }, Spec: PostgresSpec{ PostgresqlParam: PostgresqlParam{ - PgVersion: "16", + PgVersion: "17", Parameters: map[string]string{ "shared_buffers": "32MB", "max_connections": "10", @@ -339,7 +339,7 @@ var unmarshalCluster = []struct { }, Error: "", }, - marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"16","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"pod_priority_class_name":"spilo-pod-priority","volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"enableShmVolume":false,"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"acid","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`), + marshal: []byte(`{"kind":"Postgresql","apiVersion":"acid.zalan.do/v1","metadata":{"name":"acid-testcluster1","creationTimestamp":null},"spec":{"postgresql":{"version":"17","parameters":{"log_statement":"all","max_connections":"10","shared_buffers":"32MB"}},"pod_priority_class_name":"spilo-pod-priority","volume":{"size":"5Gi","storageClass":"SSD", "subPath": "subdir"},"enableShmVolume":false,"patroni":{"initdb":{"data-checksums":"true","encoding":"UTF8","locale":"en_US.UTF-8"},"pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"],"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}}},"resources":{"requests":{"cpu":"10m","memory":"50Mi"},"limits":{"cpu":"300m","memory":"3000Mi"}},"teamId":"acid","allowedSourceRanges":["127.0.0.1/32"],"numberOfInstances":2,"users":{"zalando":["superuser","createdb"]},"maintenanceWindows":["Mon:01:00-06:00","Sat:00:00-04:00","05:00-05:15"],"clone":{"cluster":"acid-batman"}},"status":{"PostgresClusterStatus":""}}`), err: nil}, { about: "example with clone", @@ -404,7 +404,7 @@ var postgresqlList = []struct { out PostgresqlList err error }{ - {"expect success", []byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace":"default","resourceVersion":"30446957","selfLink":"/apis/acid.zalan.do/v1/namespaces/default/postgresqls/acid-testcluster42","uid":"857cd208-33dc-11e7-b20a-0699041e4b03"},"spec":{"allowedSourceRanges":["185.85.220.0/22"],"numberOfInstances":1,"postgresql":{"version":"16"},"teamId":"acid","volume":{"size":"10Gi"}},"status":{"PostgresClusterStatus":"Running"}}],"kind":"List","metadata":{},"resourceVersion":"","selfLink":""}`), + {"expect success", []byte(`{"apiVersion":"v1","items":[{"apiVersion":"acid.zalan.do/v1","kind":"Postgresql","metadata":{"labels":{"team":"acid"},"name":"acid-testcluster42","namespace":"default","resourceVersion":"30446957","selfLink":"/apis/acid.zalan.do/v1/namespaces/default/postgresqls/acid-testcluster42","uid":"857cd208-33dc-11e7-b20a-0699041e4b03"},"spec":{"allowedSourceRanges":["185.85.220.0/22"],"numberOfInstances":1,"postgresql":{"version":"17"},"teamId":"acid","volume":{"size":"10Gi"}},"status":{"PostgresClusterStatus":"Running"}}],"kind":"List","metadata":{},"resourceVersion":"","selfLink":""}`), PostgresqlList{ TypeMeta: metav1.TypeMeta{ Kind: "List", @@ -425,7 +425,7 @@ var postgresqlList = []struct { }, Spec: PostgresSpec{ ClusterName: "testcluster42", - PostgresqlParam: PostgresqlParam{PgVersion: "16"}, + PostgresqlParam: PostgresqlParam{PgVersion: "17"}, Volume: Volume{Size: "10Gi"}, TeamID: "acid", AllowedSourceRanges: []string{"185.85.220.0/22"}, diff --git a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go index 557f8889c..ec2d359c8 100644 --- a/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go +++ b/pkg/apis/acid.zalan.do/v1/zz_generated.deepcopy.go @@ -1277,6 +1277,11 @@ func (in *Sidecar) DeepCopyInto(out *Sidecar) { (*in)[i].DeepCopyInto(&(*out)[i]) } } + if in.Command != nil { + in, out := &in.Command, &out.Command + *out = make([]string, len(*in)) + copy(*out, *in) + } return } @@ -1336,6 +1341,16 @@ func (in *Stream) DeepCopyInto(out *Stream) { *out = new(uint32) **out = **in } + if in.CPU != nil { + in, out := &in.CPU, &out.CPU + *out = new(string) + **out = **in + } + if in.Memory != nil { + in, out := &in.Memory, &out.Memory + *out = new(string) + **out = **in + } if in.EnableRecovery != nil { in, out := &in.EnableRecovery, &out.EnableRecovery *out = new(bool) @@ -1357,6 +1372,11 @@ func (in *Stream) DeepCopy() *Stream { // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *StreamTable) DeepCopyInto(out *StreamTable) { *out = *in + if in.IgnoreRecovery != nil { + in, out := &in.IgnoreRecovery, &out.IgnoreRecovery + *out = new(bool) + **out = **in + } if in.IdColumn != nil { in, out := &in.IdColumn, &out.IdColumn *out = new(string) diff --git a/pkg/cluster/k8sres.go b/pkg/cluster/k8sres.go index 2a19b438d..c561c2518 100644 --- a/pkg/cluster/k8sres.go +++ b/pkg/cluster/k8sres.go @@ -1230,6 +1230,7 @@ func getSidecarContainer(sidecar acidv1.Sidecar, index int, resources *v1.Resour Resources: *resources, Env: sidecar.Env, Ports: sidecar.Ports, + Command: sidecar.Command, } } diff --git a/pkg/cluster/k8sres_test.go b/pkg/cluster/k8sres_test.go index 2e5a2973d..b9647cbf9 100644 --- a/pkg/cluster/k8sres_test.go +++ b/pkg/cluster/k8sres_test.go @@ -72,18 +72,18 @@ func TestGenerateSpiloJSONConfiguration(t *testing.T) { }{ { subtest: "Patroni default configuration", - pgParam: &acidv1.PostgresqlParam{PgVersion: "16"}, + pgParam: &acidv1.PostgresqlParam{PgVersion: "17"}, patroni: &acidv1.Patroni{}, opConfig: &config.Config{ Auth: config.Auth{ PamRoleName: "zalandos", }, }, - result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/16/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{}}}`, + result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{}}}`, }, { subtest: "Patroni configured", - pgParam: &acidv1.PostgresqlParam{PgVersion: "16"}, + pgParam: &acidv1.PostgresqlParam{PgVersion: "17"}, patroni: &acidv1.Patroni{ InitDB: map[string]string{ "encoding": "UTF8", @@ -102,38 +102,38 @@ func TestGenerateSpiloJSONConfiguration(t *testing.T) { FailsafeMode: util.True(), }, opConfig: &config.Config{}, - result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/16/bin","pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"synchronous_mode":true,"synchronous_mode_strict":true,"synchronous_node_count":1,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}},"failsafe_mode":true}}}`, + result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin","pg_hba":["hostssl all all 0.0.0.0/0 md5","host all all 0.0.0.0/0 md5"]},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"},"data-checksums",{"encoding":"UTF8"},{"locale":"en_US.UTF-8"}],"dcs":{"ttl":30,"loop_wait":10,"retry_timeout":10,"maximum_lag_on_failover":33554432,"synchronous_mode":true,"synchronous_mode_strict":true,"synchronous_node_count":1,"slots":{"permanent_logical_1":{"database":"foo","plugin":"pgoutput","type":"logical"}},"failsafe_mode":true}}}`, }, { subtest: "Patroni failsafe_mode configured globally", - pgParam: &acidv1.PostgresqlParam{PgVersion: "16"}, + pgParam: &acidv1.PostgresqlParam{PgVersion: "17"}, patroni: &acidv1.Patroni{}, opConfig: &config.Config{ EnablePatroniFailsafeMode: util.True(), }, - result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/16/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":true}}}`, + result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":true}}}`, }, { subtest: "Patroni failsafe_mode configured globally, disabled for cluster", - pgParam: &acidv1.PostgresqlParam{PgVersion: "16"}, + pgParam: &acidv1.PostgresqlParam{PgVersion: "17"}, patroni: &acidv1.Patroni{ FailsafeMode: util.False(), }, opConfig: &config.Config{ EnablePatroniFailsafeMode: util.True(), }, - result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/16/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":false}}}`, + result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":false}}}`, }, { subtest: "Patroni failsafe_mode disabled globally, configured for cluster", - pgParam: &acidv1.PostgresqlParam{PgVersion: "16"}, + pgParam: &acidv1.PostgresqlParam{PgVersion: "17"}, patroni: &acidv1.Patroni{ FailsafeMode: util.True(), }, opConfig: &config.Config{ EnablePatroniFailsafeMode: util.False(), }, - result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/16/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":true}}}`, + result: `{"postgresql":{"bin_dir":"/usr/lib/postgresql/17/bin"},"bootstrap":{"initdb":[{"auth-host":"md5"},{"auth-local":"trust"}],"dcs":{"failsafe_mode":true}}}`, }, } for _, tt := range tests { @@ -164,15 +164,15 @@ func TestExtractPgVersionFromBinPath(t *testing.T) { }, { subTest: "test current bin path against hard coded template", - binPath: "/usr/lib/postgresql/16/bin", + binPath: "/usr/lib/postgresql/17/bin", template: pgBinariesLocationTemplate, - expected: "16", + expected: "17", }, { subTest: "test alternative bin path against a matching template", - binPath: "/usr/pgsql-16/bin", + binPath: "/usr/pgsql-17/bin", template: "/usr/pgsql-%v/bin", - expected: "16", + expected: "17", }, } @@ -2148,7 +2148,7 @@ func TestSidecars(t *testing.T) { spec = acidv1.PostgresSpec{ PostgresqlParam: acidv1.PostgresqlParam{ - PgVersion: "16", + PgVersion: "17", Parameters: map[string]string{ "max_connections": "100", }, diff --git a/pkg/cluster/majorversionupgrade.go b/pkg/cluster/majorversionupgrade.go index 560f8977f..a4ae5f81b 100644 --- a/pkg/cluster/majorversionupgrade.go +++ b/pkg/cluster/majorversionupgrade.go @@ -21,6 +21,7 @@ var VersionMap = map[string]int{ "14": 140000, "15": 150000, "16": 160000, + "17": 170000, } const ( @@ -44,7 +45,7 @@ func (c *Cluster) GetDesiredMajorVersionAsInt() int { func (c *Cluster) GetDesiredMajorVersion() string { if c.Config.OpConfig.MajorVersionUpgradeMode == "full" { - // e.g. current is 12, minimal is 12 allowing 12 to 16 clusters, everything below is upgraded + // e.g. current is 13, minimal is 13 allowing 13 to 17 clusters, everything below is upgraded if IsBiggerPostgresVersion(c.Spec.PgVersion, c.Config.OpConfig.MinimalMajorVersion) { c.logger.Infof("overwriting configured major version %s to %s", c.Spec.PgVersion, c.Config.OpConfig.TargetMajorVersion) return c.Config.OpConfig.TargetMajorVersion diff --git a/pkg/cluster/streams.go b/pkg/cluster/streams.go index 3d9cbae11..9e2c7482a 100644 --- a/pkg/cluster/streams.go +++ b/pkg/cluster/streams.go @@ -178,16 +178,25 @@ func (c *Cluster) syncPublication(dbName string, databaseSlotsList map[string]za func (c *Cluster) generateFabricEventStream(appId string) *zalandov1.FabricEventStream { eventStreams := make([]zalandov1.EventStream, 0) + resourceAnnotations := map[string]string{} + var err, err2 error for _, stream := range c.Spec.Streams { if stream.ApplicationId != appId { continue } + + err = setResourceAnnotation(&resourceAnnotations, stream.CPU, constants.EventStreamCpuAnnotationKey) + err2 = setResourceAnnotation(&resourceAnnotations, stream.Memory, constants.EventStreamMemoryAnnotationKey) + if err != nil || err2 != nil { + c.logger.Warningf("could not set resource annotation for event stream: %v", err) + } + for tableName, table := range stream.Tables { streamSource := c.getEventStreamSource(stream, tableName, table.IdColumn) streamFlow := getEventStreamFlow(table.PayloadColumn) streamSink := getEventStreamSink(stream, table.EventType) - streamRecovery := getEventStreamRecovery(stream, table.RecoveryEventType, table.EventType) + streamRecovery := getEventStreamRecovery(stream, table.RecoveryEventType, table.EventType, table.IgnoreRecovery) eventStreams = append(eventStreams, zalandov1.EventStream{ EventStreamFlow: streamFlow, @@ -207,7 +216,7 @@ func (c *Cluster) generateFabricEventStream(appId string) *zalandov1.FabricEvent Name: fmt.Sprintf("%s-%s", c.Name, strings.ToLower(util.RandomPassword(5))), Namespace: c.Namespace, Labels: c.labelsSet(true), - Annotations: c.AnnotationsToPropagate(c.annotationsSet(nil)), + Annotations: c.AnnotationsToPropagate(c.annotationsSet(resourceAnnotations)), OwnerReferences: c.ownerReferences(), }, Spec: zalandov1.FabricEventStreamSpec{ @@ -217,6 +226,27 @@ func (c *Cluster) generateFabricEventStream(appId string) *zalandov1.FabricEvent } } +func setResourceAnnotation(annotations *map[string]string, resource *string, key string) error { + var ( + isSmaller bool + err error + ) + if resource != nil { + currentValue, exists := (*annotations)[key] + if exists { + isSmaller, err = util.IsSmallerQuantity(currentValue, *resource) + if err != nil { + return fmt.Errorf("could not compare resource in %q annotation: %v", key, err) + } + } + if isSmaller || !exists { + (*annotations)[key] = *resource + } + } + + return nil +} + func (c *Cluster) getEventStreamSource(stream acidv1.Stream, tableName string, idColumn *string) zalandov1.EventStreamSource { table, schema := getTableSchema(tableName) streamFilter := stream.Filter[tableName] @@ -247,7 +277,7 @@ func getEventStreamSink(stream acidv1.Stream, eventType string) zalandov1.EventS } } -func getEventStreamRecovery(stream acidv1.Stream, recoveryEventType, eventType string) zalandov1.EventStreamRecovery { +func getEventStreamRecovery(stream acidv1.Stream, recoveryEventType, eventType string, ignoreRecovery *bool) zalandov1.EventStreamRecovery { if (stream.EnableRecovery != nil && !*stream.EnableRecovery) || (stream.EnableRecovery == nil && recoveryEventType == "") { return zalandov1.EventStreamRecovery{ @@ -255,6 +285,12 @@ func getEventStreamRecovery(stream acidv1.Stream, recoveryEventType, eventType s } } + if ignoreRecovery != nil && *ignoreRecovery { + return zalandov1.EventStreamRecovery{ + Type: constants.EventStreamRecoveryIgnoreType, + } + } + if stream.EnableRecovery != nil && *stream.EnableRecovery && recoveryEventType == "" { recoveryEventType = fmt.Sprintf("%s-%s", eventType, constants.EventStreamRecoverySuffix) } @@ -442,7 +478,9 @@ func (c *Cluster) syncStream(appId string) error { c.setProcessName("syncing stream with applicationId %s", appId) c.logger.Debugf("syncing stream with applicationId %s", appId) - listOptions := metav1.ListOptions{LabelSelector: c.labelsSet(true).String()} + listOptions := metav1.ListOptions{ + LabelSelector: c.labelsSet(false).String(), + } streams, err = c.KubeClient.FabricEventStreams(c.Namespace).List(context.TODO(), listOptions) if err != nil { return fmt.Errorf("could not list of FabricEventStreams for applicationId %s: %v", appId, err) @@ -453,15 +491,6 @@ func (c *Cluster) syncStream(appId string) error { if stream.Spec.ApplicationId != appId { continue } - if streamExists { - c.logger.Warningf("more than one event stream with applicationId %s found, delete it", appId) - if err = c.KubeClient.FabricEventStreams(stream.ObjectMeta.Namespace).Delete(context.TODO(), stream.ObjectMeta.Name, metav1.DeleteOptions{}); err != nil { - c.logger.Errorf("could not delete event stream %q with applicationId %s: %v", stream.ObjectMeta.Name, appId, err) - } else { - c.logger.Infof("redundant event stream %q with applicationId %s has been successfully deleted", stream.ObjectMeta.Name, appId) - } - continue - } streamExists = true desiredStreams := c.generateFabricEventStream(appId) if !reflect.DeepEqual(stream.ObjectMeta.OwnerReferences, desiredStreams.ObjectMeta.OwnerReferences) { @@ -476,7 +505,8 @@ func (c *Cluster) syncStream(appId string) error { } if match, reason := c.compareStreams(&stream, desiredStreams); !match { c.logger.Infof("updating event streams with applicationId %s: %s", appId, reason) - desiredStreams.ObjectMeta = stream.ObjectMeta + // make sure to keep the old name with randomly generated suffix + desiredStreams.ObjectMeta.Name = stream.ObjectMeta.Name updatedStream, err := c.updateStreams(desiredStreams) if err != nil { return fmt.Errorf("failed updating event streams %s with applicationId %s: %v", stream.Name, appId, err) @@ -484,6 +514,7 @@ func (c *Cluster) syncStream(appId string) error { c.Streams[appId] = updatedStream c.logger.Infof("event streams %q with applicationId %s have been successfully updated", updatedStream.Name, appId) } + break } if !streamExists { @@ -501,15 +532,29 @@ func (c *Cluster) syncStream(appId string) error { func (c *Cluster) compareStreams(curEventStreams, newEventStreams *zalandov1.FabricEventStream) (match bool, reason string) { reasons := make([]string, 0) + desiredAnnotations := make(map[string]string) match = true // stream operator can add extra annotations so incl. current annotations in desired annotations - desiredAnnotations := c.annotationsSet(curEventStreams.Annotations) + for curKey, curValue := range curEventStreams.Annotations { + if _, exists := desiredAnnotations[curKey]; !exists { + desiredAnnotations[curKey] = curValue + } + } + // add/or override annotations if cpu and memory values were changed + for newKey, newValue := range newEventStreams.Annotations { + desiredAnnotations[newKey] = newValue + } if changed, reason := c.compareAnnotations(curEventStreams.ObjectMeta.Annotations, desiredAnnotations); changed { match = false reasons = append(reasons, fmt.Sprintf("new streams annotations do not match: %s", reason)) } + if !reflect.DeepEqual(curEventStreams.ObjectMeta.Labels, newEventStreams.ObjectMeta.Labels) { + match = false + reasons = append(reasons, "new streams labels do not match the current ones") + } + if changed, reason := sameEventStreams(curEventStreams.Spec.EventStreams, newEventStreams.Spec.EventStreams); !changed { match = false reasons = append(reasons, fmt.Sprintf("new streams EventStreams array does not match : %s", reason)) diff --git a/pkg/cluster/streams_test.go b/pkg/cluster/streams_test.go index 92d28663e..934f2bfd4 100644 --- a/pkg/cluster/streams_test.go +++ b/pkg/cluster/streams_test.go @@ -65,12 +65,17 @@ var ( EventType: "stream-type-b", RecoveryEventType: "stream-type-b-dlq", }, + "data.foofoobar": { + EventType: "stream-type-c", + IgnoreRecovery: util.True(), + }, }, EnableRecovery: util.True(), Filter: map[string]*string{ "data.bar": k8sutil.StringToPointer("[?(@.source.txId > 500 && @.source.lsn > 123456)]"), }, BatchSize: k8sutil.UInt32ToPointer(uint32(100)), + CPU: k8sutil.StringToPointer("250m"), }, }, TeamID: "acid", @@ -88,9 +93,12 @@ var ( ObjectMeta: metav1.ObjectMeta{ Name: fmt.Sprintf("%s-12345", clusterName), Namespace: namespace, + Annotations: map[string]string{ + constants.EventStreamCpuAnnotationKey: "250m", + }, Labels: map[string]string{ "application": "spilo", - "cluster-name": fmt.Sprintf("%s-2", clusterName), + "cluster-name": clusterName, "team": "acid", }, OwnerReferences: []metav1.OwnerReference{ @@ -180,6 +188,37 @@ var ( Type: constants.EventStreamSourcePGType, }, }, + { + EventStreamFlow: zalandov1.EventStreamFlow{ + Type: constants.EventStreamFlowPgGenericType, + }, + EventStreamRecovery: zalandov1.EventStreamRecovery{ + Type: constants.EventStreamRecoveryIgnoreType, + }, + EventStreamSink: zalandov1.EventStreamSink{ + EventType: "stream-type-c", + MaxBatchSize: k8sutil.UInt32ToPointer(uint32(100)), + Type: constants.EventStreamSinkNakadiType, + }, + EventStreamSource: zalandov1.EventStreamSource{ + Connection: zalandov1.Connection{ + DBAuth: zalandov1.DBAuth{ + Name: fmt.Sprintf("fes-user.%s.credentials.postgresql.acid.zalan.do", clusterName), + PasswordKey: "password", + Type: constants.EventStreamSourceAuthType, + UserKey: "username", + }, + Url: fmt.Sprintf("jdbc:postgresql://%s.%s/foo?user=%s&ssl=true&sslmode=require", clusterName, namespace, fesUser), + SlotName: slotName, + PluginType: constants.EventStreamSourcePluginType, + }, + Schema: "data", + EventStreamTable: zalandov1.EventStreamTable{ + Name: "foofoobar", + }, + Type: constants.EventStreamSourcePGType, + }, + }, }, }, } @@ -449,7 +488,7 @@ func TestGenerateFabricEventStream(t *testing.T) { } listOptions := metav1.ListOptions{ - LabelSelector: cluster.labelsSet(true).String(), + LabelSelector: cluster.labelsSet(false).String(), } streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) assert.NoError(t, err) @@ -488,20 +527,20 @@ func newFabricEventStream(streams []zalandov1.EventStream, annotations map[strin } func TestSyncStreams(t *testing.T) { - pg.Name = fmt.Sprintf("%s-2", pg.Name) + newClusterName := fmt.Sprintf("%s-2", pg.Name) + pg.Name = newClusterName var cluster = New( Config{ OpConfig: config.Config{ PodManagementPolicy: "ordered_ready", Resources: config.Resources{ - ClusterLabels: map[string]string{"application": "spilo"}, - ClusterNameLabel: "cluster-name", - DefaultCPURequest: "300m", - DefaultCPULimit: "300m", - DefaultMemoryRequest: "300Mi", - DefaultMemoryLimit: "300Mi", - EnableOwnerReferences: util.True(), - PodRoleLabel: "spilo-role", + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + DefaultCPURequest: "300m", + DefaultCPULimit: "300m", + DefaultMemoryRequest: "300Mi", + DefaultMemoryLimit: "300Mi", + PodRoleLabel: "spilo-role", }, }, }, client, pg, logger, eventRecorder) @@ -514,39 +553,23 @@ func TestSyncStreams(t *testing.T) { err = cluster.syncStream(appId) assert.NoError(t, err) - // create a second stream with same spec but with different name - createdStream, err := cluster.KubeClient.FabricEventStreams(namespace).Create( - context.TODO(), fes, metav1.CreateOptions{}) - assert.NoError(t, err) - assert.Equal(t, createdStream.Spec.ApplicationId, appId) - - // check that two streams exist - listOptions := metav1.ListOptions{ - LabelSelector: cluster.labelsSet(true).String(), - } - streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) - assert.NoError(t, err) - assert.Equalf(t, 2, len(streams.Items), "unexpected number of streams found: got %d, but expected only 2", len(streams.Items)) - - // sync the stream which should remove the redundant stream + // sync the stream again err = cluster.syncStream(appId) assert.NoError(t, err) // check that only one stream remains after sync - streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) + listOptions := metav1.ListOptions{ + LabelSelector: cluster.labelsSet(false).String(), + } + streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) assert.NoError(t, err) assert.Equalf(t, 1, len(streams.Items), "unexpected number of streams found: got %d, but expected only 1", len(streams.Items)) - - // check owner references - if !reflect.DeepEqual(streams.Items[0].OwnerReferences, cluster.ownerReferences()) { - t.Errorf("unexpected owner references, expected %#v, got %#v", cluster.ownerReferences(), streams.Items[0].OwnerReferences) - } } func TestSameStreams(t *testing.T) { testName := "TestSameStreams" - annotationsA := map[string]string{"owned-by": "acid"} - annotationsB := map[string]string{"owned-by": "foo"} + annotationsA := map[string]string{constants.EventStreamMemoryAnnotationKey: "500Mi"} + annotationsB := map[string]string{constants.EventStreamMemoryAnnotationKey: "1Gi"} stream1 := zalandov1.EventStream{ EventStreamFlow: zalandov1.EventStreamFlow{}, @@ -615,42 +638,49 @@ func TestSameStreams(t *testing.T) { streamsA: newFabricEventStream([]zalandov1.EventStream{stream1}, nil), streamsB: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, nil), match: false, - reason: "number of defined streams is different", + reason: "new streams EventStreams array does not match : number of defined streams is different", }, { subTest: "different number of streams", streamsA: newFabricEventStream([]zalandov1.EventStream{stream1}, nil), streamsB: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, nil), match: false, - reason: "number of defined streams is different", + reason: "new streams EventStreams array does not match : number of defined streams is different", }, { subTest: "event stream specs differ", streamsA: newFabricEventStream([]zalandov1.EventStream{stream1, stream2}, nil), streamsB: fes, match: false, - reason: "number of defined streams is different", + reason: "new streams annotations do not match: Added \"fes.zalando.org/FES_CPU\" with value \"250m\"., new streams labels do not match the current ones, new streams EventStreams array does not match : number of defined streams is different", }, { subTest: "event stream recovery specs differ", streamsA: newFabricEventStream([]zalandov1.EventStream{stream2}, nil), streamsB: newFabricEventStream([]zalandov1.EventStream{stream3}, nil), match: false, - reason: "event stream specs differ", + reason: "new streams EventStreams array does not match : event stream specs differ", + }, + { + subTest: "event stream with new annotations", + streamsA: newFabricEventStream([]zalandov1.EventStream{stream2}, nil), + streamsB: newFabricEventStream([]zalandov1.EventStream{stream2}, annotationsA), + match: false, + reason: "new streams annotations do not match: Added \"fes.zalando.org/FES_MEMORY\" with value \"500Mi\".", }, { subTest: "event stream annotations differ", - streamsA: newFabricEventStream([]zalandov1.EventStream{stream2}, annotationsA), + streamsA: newFabricEventStream([]zalandov1.EventStream{stream3}, annotationsA), streamsB: newFabricEventStream([]zalandov1.EventStream{stream3}, annotationsB), match: false, - reason: "event stream specs differ", + reason: "new streams annotations do not match: \"fes.zalando.org/FES_MEMORY\" changed from \"500Mi\" to \"1Gi\".", }, } for _, tt := range tests { streamsMatch, matchReason := cluster.compareStreams(tt.streamsA, tt.streamsB) - if streamsMatch != tt.match { - t.Errorf("%s %s: unexpected match result when comparing streams: got %s, epxected %s", + if streamsMatch != tt.match || matchReason != tt.reason { + t.Errorf("%s %s: unexpected match result when comparing streams: got %s, expected %s", testName, tt.subTest, matchReason, tt.reason) } } @@ -658,6 +688,105 @@ func TestSameStreams(t *testing.T) { func TestUpdateStreams(t *testing.T) { pg.Name = fmt.Sprintf("%s-3", pg.Name) + var cluster = New( + Config{ + OpConfig: config.Config{ + PodManagementPolicy: "ordered_ready", + Resources: config.Resources{ + ClusterLabels: map[string]string{"application": "spilo"}, + ClusterNameLabel: "cluster-name", + DefaultCPURequest: "300m", + DefaultCPULimit: "300m", + DefaultMemoryRequest: "300Mi", + DefaultMemoryLimit: "300Mi", + EnableOwnerReferences: util.True(), + PodRoleLabel: "spilo-role", + }, + }, + }, client, pg, logger, eventRecorder) + + _, err := cluster.KubeClient.Postgresqls(namespace).Create( + context.TODO(), &pg, metav1.CreateOptions{}) + assert.NoError(t, err) + + // create stream with different owner reference + fes.ObjectMeta.Name = fmt.Sprintf("%s-12345", pg.Name) + fes.ObjectMeta.Labels["cluster-name"] = pg.Name + createdStream, err := cluster.KubeClient.FabricEventStreams(namespace).Create( + context.TODO(), fes, metav1.CreateOptions{}) + assert.NoError(t, err) + assert.Equal(t, createdStream.Spec.ApplicationId, appId) + + // sync the stream which should update the owner reference + err = cluster.syncStream(appId) + assert.NoError(t, err) + + // check that only one stream exists after sync + listOptions := metav1.ListOptions{ + LabelSelector: cluster.labelsSet(true).String(), + } + streams, err := cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) + assert.NoError(t, err) + assert.Equalf(t, 1, len(streams.Items), "unexpected number of streams found: got %d, but expected only 1", len(streams.Items)) + + // compare owner references + if !reflect.DeepEqual(streams.Items[0].OwnerReferences, cluster.ownerReferences()) { + t.Errorf("unexpected owner references, expected %#v, got %#v", cluster.ownerReferences(), streams.Items[0].OwnerReferences) + } + + // change specs of streams and patch CRD + for i, stream := range pg.Spec.Streams { + if stream.ApplicationId == appId { + streamTable := stream.Tables["data.bar"] + streamTable.EventType = "stream-type-c" + stream.Tables["data.bar"] = streamTable + stream.BatchSize = k8sutil.UInt32ToPointer(uint32(250)) + pg.Spec.Streams[i] = stream + } + } + + // compare stream returned from API with expected stream + streams = patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions) + result := cluster.generateFabricEventStream(appId) + if match, _ := cluster.compareStreams(&streams.Items[0], result); !match { + t.Errorf("Malformed FabricEventStream after updating manifest, expected %#v, got %#v", streams.Items[0], result) + } + + // disable recovery + for idx, stream := range pg.Spec.Streams { + if stream.ApplicationId == appId { + stream.EnableRecovery = util.False() + pg.Spec.Streams[idx] = stream + } + } + + streams = patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions) + result = cluster.generateFabricEventStream(appId) + if match, _ := cluster.compareStreams(&streams.Items[0], result); !match { + t.Errorf("Malformed FabricEventStream after disabling event recovery, expected %#v, got %#v", streams.Items[0], result) + } +} + +func patchPostgresqlStreams(t *testing.T, cluster *Cluster, pgSpec *acidv1.PostgresSpec, listOptions metav1.ListOptions) (streams *zalandov1.FabricEventStreamList) { + patchData, err := specPatch(pgSpec) + assert.NoError(t, err) + + pgPatched, err := cluster.KubeClient.Postgresqls(namespace).Patch( + context.TODO(), cluster.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, "spec") + assert.NoError(t, err) + + cluster.Postgresql.Spec = pgPatched.Spec + err = cluster.syncStream(appId) + assert.NoError(t, err) + + streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) + assert.NoError(t, err) + + return streams +} + +func TestDeleteStreams(t *testing.T) { + pg.Name = fmt.Sprintf("%s-4", pg.Name) var cluster = New( Config{ OpConfig: config.Config{ @@ -695,7 +824,7 @@ func TestUpdateStreams(t *testing.T) { // compare stream returned from API with expected stream listOptions := metav1.ListOptions{ - LabelSelector: cluster.labelsSet(true).String(), + LabelSelector: cluster.labelsSet(false).String(), } streams := patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions) result := cluster.generateFabricEventStream(appId) @@ -703,6 +832,14 @@ func TestUpdateStreams(t *testing.T) { t.Errorf("Malformed FabricEventStream after updating manifest, expected %#v, got %#v", streams.Items[0], result) } + // change teamId and check that stream is updated + pg.Spec.TeamID = "new-team" + streams = patchPostgresqlStreams(t, cluster, &pg.Spec, listOptions) + result = cluster.generateFabricEventStream(appId) + if match, _ := cluster.compareStreams(&streams.Items[0], result); !match { + t.Errorf("Malformed FabricEventStream after updating teamId, expected %#v, got %#v", streams.Items[0].ObjectMeta.Labels, result.ObjectMeta.Labels) + } + // disable recovery for idx, stream := range pg.Spec.Streams { if stream.ApplicationId == appId { @@ -717,9 +854,6 @@ func TestUpdateStreams(t *testing.T) { t.Errorf("Malformed FabricEventStream after disabling event recovery, expected %#v, got %#v", streams.Items[0], result) } - mockClient := k8sutil.NewMockKubernetesClient() - cluster.KubeClient.CustomResourceDefinitionsGetter = mockClient.CustomResourceDefinitionsGetter - // remove streams from manifest pg.Spec.Streams = nil pgUpdated, err := cluster.KubeClient.Postgresqls(namespace).Update( @@ -729,26 +863,29 @@ func TestUpdateStreams(t *testing.T) { appIds := getDistinctApplicationIds(pgUpdated.Spec.Streams) cluster.cleanupRemovedStreams(appIds) + // check that streams have been deleted streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) - if len(streams.Items) > 0 || err != nil { - t.Errorf("stream resource has not been removed or unexpected error %v", err) - } -} + assert.NoError(t, err) + assert.Equalf(t, 0, len(streams.Items), "unexpected number of streams found: got %d, but expected none", len(streams.Items)) -func patchPostgresqlStreams(t *testing.T, cluster *Cluster, pgSpec *acidv1.PostgresSpec, listOptions metav1.ListOptions) (streams *zalandov1.FabricEventStreamList) { - patchData, err := specPatch(pgSpec) + // create stream to test deleteStreams code + fes.ObjectMeta.Name = fmt.Sprintf("%s-12345", pg.Name) + fes.ObjectMeta.Labels["cluster-name"] = pg.Name + _, err = cluster.KubeClient.FabricEventStreams(namespace).Create( + context.TODO(), fes, metav1.CreateOptions{}) assert.NoError(t, err) - pgPatched, err := cluster.KubeClient.Postgresqls(namespace).Patch( - context.TODO(), cluster.Name, types.MergePatchType, patchData, metav1.PatchOptions{}, "spec") - assert.NoError(t, err) - - cluster.Postgresql.Spec = pgPatched.Spec + // sync it once to cluster struct err = cluster.syncStream(appId) assert.NoError(t, err) + // we need a mock client because deleteStreams checks for CRD existance + mockClient := k8sutil.NewMockKubernetesClient() + cluster.KubeClient.CustomResourceDefinitionsGetter = mockClient.CustomResourceDefinitionsGetter + cluster.deleteStreams() + + // check that streams have been deleted streams, err = cluster.KubeClient.FabricEventStreams(namespace).List(context.TODO(), listOptions) assert.NoError(t, err) - - return streams + assert.Equalf(t, 0, len(streams.Items), "unexpected number of streams found: got %d, but expected none", len(streams.Items)) } diff --git a/pkg/cluster/volumes.go b/pkg/cluster/volumes.go index 165c6c7a3..240220ccf 100644 --- a/pkg/cluster/volumes.go +++ b/pkg/cluster/volumes.go @@ -151,7 +151,7 @@ func (c *Cluster) populateVolumeMetaData() error { volumeIds := []string{} var volumeID string for _, pv := range pvs { - volumeID, err = c.VolumeResizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID) + volumeID, err = c.VolumeResizer.GetProviderVolumeID(pv) if err != nil { continue } diff --git a/pkg/cluster/volumes_test.go b/pkg/cluster/volumes_test.go index 99780982f..95ecc7624 100644 --- a/pkg/cluster/volumes_test.go +++ b/pkg/cluster/volumes_test.go @@ -216,6 +216,12 @@ func TestMigrateEBS(t *testing.T) { resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil) resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil) + resizer.EXPECT().GetProviderVolumeID(gomock.Any()). + DoAndReturn(func(pv *v1.PersistentVolume) (string, error) { + return resizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID) + }). + Times(2) + resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return( []volumes.VolumeProperties{ {VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 100}, @@ -322,6 +328,12 @@ func TestMigrateGp3Support(t *testing.T) { resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil) resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-3")).Return("ebs-volume-3", nil) + resizer.EXPECT().GetProviderVolumeID(gomock.Any()). + DoAndReturn(func(pv *v1.PersistentVolume) (string, error) { + return resizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID) + }). + Times(3) + resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2", "ebs-volume-3"})).Return( []volumes.VolumeProperties{ {VolumeID: "ebs-volume-1", VolumeType: "gp3", Size: 100, Iops: 3000}, @@ -377,6 +389,12 @@ func TestManualGp2Gp3Support(t *testing.T) { resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil) resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil) + resizer.EXPECT().GetProviderVolumeID(gomock.Any()). + DoAndReturn(func(pv *v1.PersistentVolume) (string, error) { + return resizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID) + }). + Times(2) + resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return( []volumes.VolumeProperties{ {VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 150, Iops: 3000}, @@ -436,6 +454,12 @@ func TestDontTouchType(t *testing.T) { resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-1")).Return("ebs-volume-1", nil) resizer.EXPECT().ExtractVolumeID(gomock.Eq("aws://eu-central-1b/ebs-volume-2")).Return("ebs-volume-2", nil) + resizer.EXPECT().GetProviderVolumeID(gomock.Any()). + DoAndReturn(func(pv *v1.PersistentVolume) (string, error) { + return resizer.ExtractVolumeID(pv.Spec.AWSElasticBlockStore.VolumeID) + }). + Times(2) + resizer.EXPECT().DescribeVolumes(gomock.Eq([]string{"ebs-volume-1", "ebs-volume-2"})).Return( []volumes.VolumeProperties{ {VolumeID: "ebs-volume-1", VolumeType: "gp2", Size: 150, Iops: 3000}, diff --git a/pkg/controller/operator_config.go b/pkg/controller/operator_config.go index b77036da9..3d8bc5670 100644 --- a/pkg/controller/operator_config.go +++ b/pkg/controller/operator_config.go @@ -39,7 +39,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur result.EnableTeamIdClusternamePrefix = fromCRD.EnableTeamIdClusternamePrefix result.EtcdHost = fromCRD.EtcdHost result.KubernetesUseConfigMaps = fromCRD.KubernetesUseConfigMaps - result.DockerImage = util.Coalesce(fromCRD.DockerImage, "ghcr.io/zalando/spilo-16:3.3-p1") + result.DockerImage = util.Coalesce(fromCRD.DockerImage, "ghcr.io/zalando/spilo-17:4.0-p2") result.Workers = util.CoalesceUInt32(fromCRD.Workers, 8) result.MinInstances = fromCRD.MinInstances result.MaxInstances = fromCRD.MaxInstances @@ -62,8 +62,8 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur // major version upgrade config result.MajorVersionUpgradeMode = util.Coalesce(fromCRD.MajorVersionUpgrade.MajorVersionUpgradeMode, "manual") result.MajorVersionUpgradeTeamAllowList = fromCRD.MajorVersionUpgrade.MajorVersionUpgradeTeamAllowList - result.MinimalMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.MinimalMajorVersion, "12") - result.TargetMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.TargetMajorVersion, "16") + result.MinimalMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.MinimalMajorVersion, "13") + result.TargetMajorVersion = util.Coalesce(fromCRD.MajorVersionUpgrade.TargetMajorVersion, "17") // kubernetes config result.EnableOwnerReferences = util.CoalesceBool(fromCRD.Kubernetes.EnableOwnerReferences, util.False()) @@ -181,7 +181,7 @@ func (c *Controller) importConfigurationFromCRD(fromCRD *acidv1.OperatorConfigur // logical backup config result.LogicalBackupSchedule = util.Coalesce(fromCRD.LogicalBackup.Schedule, "30 00 * * *") - result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0") + result.LogicalBackupDockerImage = util.Coalesce(fromCRD.LogicalBackup.DockerImage, "ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0") result.LogicalBackupProvider = util.Coalesce(fromCRD.LogicalBackup.BackupProvider, "s3") result.LogicalBackupAzureStorageAccountName = fromCRD.LogicalBackup.AzureStorageAccountName result.LogicalBackupAzureStorageAccountKey = fromCRD.LogicalBackup.AzureStorageAccountKey diff --git a/pkg/spec/types.go b/pkg/spec/types.go index cfa293e14..d727aee42 100644 --- a/pkg/spec/types.go +++ b/pkg/spec/types.go @@ -122,6 +122,9 @@ type ControllerConfig struct { IgnoredAnnotations []string EnableJsonLogging bool + + KubeQPS int + KubeBurst int } // cached value for the GetOperatorNamespace diff --git a/pkg/util/config/config.go b/pkg/util/config/config.go index 09a5a9a93..0bd04bb53 100644 --- a/pkg/util/config/config.go +++ b/pkg/util/config/config.go @@ -128,7 +128,7 @@ type Scalyr struct { // LogicalBackup defines configuration for logical backup type LogicalBackup struct { LogicalBackupSchedule string `name:"logical_backup_schedule" default:"30 00 * * *"` - LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"ghcr.io/zalando/postgres-operator/logical-backup:v1.13.0"` + LogicalBackupDockerImage string `name:"logical_backup_docker_image" default:"ghcr.io/zalando/postgres-operator/logical-backup:v1.14.0"` LogicalBackupProvider string `name:"logical_backup_provider" default:"s3"` LogicalBackupAzureStorageAccountName string `name:"logical_backup_azure_storage_account_name" default:""` LogicalBackupAzureStorageContainer string `name:"logical_backup_azure_storage_container" default:""` @@ -176,7 +176,7 @@ type Config struct { WatchedNamespace string `name:"watched_namespace"` // special values: "*" means 'watch all namespaces', the empty string "" means 'watch a namespace where operator is deployed to' KubernetesUseConfigMaps bool `name:"kubernetes_use_configmaps" default:"false"` EtcdHost string `name:"etcd_host" default:""` // special values: the empty string "" means Patroni will use K8s as a DCS - DockerImage string `name:"docker_image" default:"ghcr.io/zalando/spilo-16:3.3-p1"` + DockerImage string `name:"docker_image" default:"ghcr.io/zalando/spilo-17:4.0-p2"` SidecarImages map[string]string `name:"sidecar_docker_images"` // deprecated in favour of SidecarContainers SidecarContainers []v1.Container `name:"sidecars"` PodServiceAccountName string `name:"pod_service_account_name" default:"postgres-pod"` @@ -247,8 +247,8 @@ type Config struct { EnableTeamIdClusternamePrefix bool `name:"enable_team_id_clustername_prefix" default:"false"` MajorVersionUpgradeMode string `name:"major_version_upgrade_mode" default:"manual"` MajorVersionUpgradeTeamAllowList []string `name:"major_version_upgrade_team_allow_list" default:""` - MinimalMajorVersion string `name:"minimal_major_version" default:"12"` - TargetMajorVersion string `name:"target_major_version" default:"16"` + MinimalMajorVersion string `name:"minimal_major_version" default:"13"` + TargetMajorVersion string `name:"target_major_version" default:"17"` PatroniAPICheckInterval time.Duration `name:"patroni_api_check_interval" default:"1s"` PatroniAPICheckTimeout time.Duration `name:"patroni_api_check_timeout" default:"5s"` EnablePatroniFailsafeMode *bool `name:"enable_patroni_failsafe_mode" default:"false"` diff --git a/pkg/util/constants/aws.go b/pkg/util/constants/aws.go index f1cfd5975..147e58889 100644 --- a/pkg/util/constants/aws.go +++ b/pkg/util/constants/aws.go @@ -7,6 +7,7 @@ const ( // EBS related constants EBSVolumeIDStart = "/vol-" EBSProvisioner = "kubernetes.io/aws-ebs" + EBSDriver = "ebs.csi.aws.com" //https://docs.aws.amazon.com/AWSEC2/latest/APIReference/API_VolumeModification.html EBSVolumeStateModifying = "modifying" EBSVolumeStateOptimizing = "optimizing" diff --git a/pkg/util/constants/streams.go b/pkg/util/constants/streams.go index 8916701f3..cb4bb6a3f 100644 --- a/pkg/util/constants/streams.go +++ b/pkg/util/constants/streams.go @@ -2,16 +2,19 @@ package constants // PostgreSQL specific constants const ( - EventStreamCRDApiVersion = "zalando.org/v1" - EventStreamCRDKind = "FabricEventStream" - EventStreamCRDName = "fabriceventstreams.zalando.org" - EventStreamSourcePGType = "PostgresLogicalReplication" - EventStreamSourceSlotPrefix = "fes" - EventStreamSourcePluginType = "pgoutput" - EventStreamSourceAuthType = "DatabaseAuthenticationSecret" - EventStreamFlowPgGenericType = "PostgresWalToGenericNakadiEvent" - EventStreamSinkNakadiType = "Nakadi" - EventStreamRecoveryNoneType = "None" - EventStreamRecoveryDLQType = "DeadLetter" - EventStreamRecoverySuffix = "dead-letter-queue" + EventStreamCRDApiVersion = "zalando.org/v1" + EventStreamCRDKind = "FabricEventStream" + EventStreamCRDName = "fabriceventstreams.zalando.org" + EventStreamSourcePGType = "PostgresLogicalReplication" + EventStreamSourceSlotPrefix = "fes" + EventStreamSourcePluginType = "pgoutput" + EventStreamSourceAuthType = "DatabaseAuthenticationSecret" + EventStreamFlowPgGenericType = "PostgresWalToGenericNakadiEvent" + EventStreamSinkNakadiType = "Nakadi" + EventStreamRecoveryDLQType = "DeadLetter" + EventStreamRecoveryIgnoreType = "Ignore" + EventStreamRecoveryNoneType = "None" + EventStreamRecoverySuffix = "dead-letter-queue" + EventStreamCpuAnnotationKey = "fes.zalando.org/FES_CPU" + EventStreamMemoryAnnotationKey = "fes.zalando.org/FES_MEMORY" ) diff --git a/pkg/util/volumes/ebs.go b/pkg/util/volumes/ebs.go index f625dab2f..cb8f8e97f 100644 --- a/pkg/util/volumes/ebs.go +++ b/pkg/util/volumes/ebs.go @@ -36,7 +36,8 @@ func (r *EBSVolumeResizer) IsConnectedToProvider() bool { // VolumeBelongsToProvider checks if the given persistent volume is backed by EBS. func (r *EBSVolumeResizer) VolumeBelongsToProvider(pv *v1.PersistentVolume) bool { - return pv.Spec.AWSElasticBlockStore != nil && pv.Annotations[constants.VolumeStorateProvisionerAnnotation] == constants.EBSProvisioner + return (pv.Spec.AWSElasticBlockStore != nil && pv.Annotations[constants.VolumeStorateProvisionerAnnotation] == constants.EBSProvisioner) || + (pv.Spec.CSI != nil && pv.Spec.CSI.Driver == constants.EBSDriver) } // ExtractVolumeID extracts volumeID from "aws://eu-central-1a/vol-075ddfc4a127d0bd4" @@ -54,7 +55,12 @@ func (r *EBSVolumeResizer) ExtractVolumeID(volumeID string) (string, error) { // GetProviderVolumeID converts aws://eu-central-1b/vol-00f93d4827217c629 to vol-00f93d4827217c629 for EBS volumes func (r *EBSVolumeResizer) GetProviderVolumeID(pv *v1.PersistentVolume) (string, error) { - volumeID := pv.Spec.AWSElasticBlockStore.VolumeID + var volumeID string = "" + if pv.Spec.CSI != nil { + volumeID = pv.Spec.CSI.VolumeHandle + } else if pv.Spec.AWSElasticBlockStore != nil { + volumeID = pv.Spec.AWSElasticBlockStore.VolumeID + } if volumeID == "" { return "", fmt.Errorf("got empty volume id for volume %v", pv) } diff --git a/pkg/util/volumes/ebs_test.go b/pkg/util/volumes/ebs_test.go new file mode 100644 index 000000000..6f722ff7b --- /dev/null +++ b/pkg/util/volumes/ebs_test.go @@ -0,0 +1,123 @@ +package volumes + +import ( + "fmt" + "testing" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +func TestGetProviderVolumeID(t *testing.T) { + tests := []struct { + name string + pv *v1.PersistentVolume + expected string + err error + }{ + { + name: "CSI volume handle", + pv: &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + CSI: &v1.CSIPersistentVolumeSource{ + VolumeHandle: "vol-075ddfc4a127d0bd5", + }, + }, + }, + }, + expected: "vol-075ddfc4a127d0bd5", + err: nil, + }, + { + name: "AWS EBS volume handle", + pv: &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ + VolumeID: "aws://eu-central-1a/vol-075ddfc4a127d0bd4", + }, + }, + }, + }, + expected: "vol-075ddfc4a127d0bd4", + err: nil, + }, + { + name: "Empty volume handle", + pv: &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{}, + }, + expected: "", + err: fmt.Errorf("got empty volume id for volume %v", &v1.PersistentVolume{}), + }, + } + + resizer := EBSVolumeResizer{} + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + volumeID, err := resizer.GetProviderVolumeID(tt.pv) + if volumeID != tt.expected || (err != nil && err.Error() != tt.err.Error()) { + t.Errorf("expected %v, got %v, expected err %v, got %v", tt.expected, volumeID, tt.err, err) + } + }) + } +} + +func TestVolumeBelongsToProvider(t *testing.T) { + tests := []struct { + name string + pv *v1.PersistentVolume + expected bool + }{ + { + name: "CSI volume handle", + pv: &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + CSI: &v1.CSIPersistentVolumeSource{ + Driver: "ebs.csi.aws.com", + VolumeHandle: "vol-075ddfc4a127d0bd5", + }, + }, + }, + }, + expected: true, + }, + { + name: "AWS EBS volume handle", + pv: &v1.PersistentVolume{ + ObjectMeta: metav1.ObjectMeta{ + Annotations: map[string]string { + "pv.kubernetes.io/provisioned-by": "kubernetes.io/aws-ebs", + }, + }, + Spec: v1.PersistentVolumeSpec{ + PersistentVolumeSource: v1.PersistentVolumeSource{ + AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ + VolumeID: "aws://eu-central-1a/vol-075ddfc4a127d0bd4", + }, + }, + }, + }, + expected: true, + }, + { + name: "Empty volume source", + pv: &v1.PersistentVolume{ + Spec: v1.PersistentVolumeSpec{}, + }, + expected: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + resizer := EBSVolumeResizer{} + isProvider := resizer.VolumeBelongsToProvider(tt.pv) + if isProvider != tt.expected { + t.Errorf("expected %v, got %v", tt.expected, isProvider) + } + }) + } +} diff --git a/ui/app/package.json b/ui/app/package.json index e96ee77dc..ef24834ca 100644 --- a/ui/app/package.json +++ b/ui/app/package.json @@ -1,6 +1,6 @@ { "name": "postgres-operator-ui", - "version": "1.13.0", + "version": "1.14.0", "description": "PostgreSQL Operator UI", "main": "src/app.js", "config": { diff --git a/ui/manifests/deployment.yaml b/ui/manifests/deployment.yaml index 76d2143cb..e09dd1e4f 100644 --- a/ui/manifests/deployment.yaml +++ b/ui/manifests/deployment.yaml @@ -18,7 +18,7 @@ spec: serviceAccountName: postgres-operator-ui containers: - name: "service" - image: ghcr.io/zalando/postgres-operator-ui:v1.13.0 + image: ghcr.io/zalando/postgres-operator-ui:v1.14.0 ports: - containerPort: 8081 protocol: "TCP" @@ -73,11 +73,11 @@ spec: "limit_iops": 16000, "limit_throughput": 1000, "postgresql_versions": [ + "17", "16", "15", "14", - "13", - "12" + "13" ] } # Exemple of settings to make snapshot view working in the ui when using AWS diff --git a/ui/operator_ui/main.py b/ui/operator_ui/main.py index ba544750f..e02c2995c 100644 --- a/ui/operator_ui/main.py +++ b/ui/operator_ui/main.py @@ -267,7 +267,7 @@ DEFAULT_UI_CONFIG = { 'users_visible': True, 'databases_visible': True, 'resources_visible': RESOURCES_VISIBLE, - 'postgresql_versions': ['12', '13', '14', '15', '16'], + 'postgresql_versions': ['13', '14', '15', '16', '17'], 'dns_format_string': '{0}.{1}', 'pgui_link': '', 'static_network_whitelist': {}, diff --git a/ui/operator_ui/spiloutils.py b/ui/operator_ui/spiloutils.py index 9de072fca..f715430a1 100644 --- a/ui/operator_ui/spiloutils.py +++ b/ui/operator_ui/spiloutils.py @@ -305,7 +305,7 @@ def read_versions( if uid == 'wal' or defaulting(lambda: UUID(uid)) ] -BACKUP_VERSION_PREFIXES = ['', '10/', '11/', '12/', '13/', '14/', '15/', '16/'] +BACKUP_VERSION_PREFIXES = ['', '10/', '11/', '12/', '13/', '14/', '15/', '16/', '17/'] def read_basebackups( pg_cluster, diff --git a/ui/run_local.sh b/ui/run_local.sh index 77f4da760..37f8b1747 100755 --- a/ui/run_local.sh +++ b/ui/run_local.sh @@ -31,11 +31,11 @@ default_operator_ui_config='{ "limit_iops": 16000, "limit_throughput": 1000, "postgresql_versions": [ + "17", "16", "15", "14", - "13", - "12" + "13" ], "static_network_whitelist": { "localhost": ["172.0.0.1/32"]